@Article{info:doi/10.2196/67111, author="Khairat, Saif and Morelli, Jennifer and Liao, Wan-Ting and Aucoin, Julia and Edson, S. Barbara and Jones, B. Cheryl", title="Association of Virtual Nurses' Workflow and Cognitive Fatigue During Inpatient Encounters: Cross-Sectional Study", journal="JMIR Hum Factors", year="2025", month="Apr", day="22", volume="12", pages="e67111", keywords="virtual nursing", keywords="telemedicine", keywords="cognitive fatigue", keywords="eye-tracking technology", keywords="eye tracking", keywords="eye", keywords="nursing", keywords="virtual", keywords="cross-sectional study", keywords="workflow", keywords="inpatient", keywords="fatigue", keywords="pupil size", keywords="pupil", keywords="tracking", keywords="USA", keywords="United States", keywords="design", keywords="cognitive", keywords="virtual care", keywords="nurse", keywords="delivery model", keywords="technology", keywords="communication", keywords="EHR", keywords="electronic health record", keywords="virtual nurse", abstract="Background: The virtual nursing delivery model enables the provision of expert nursing care from a remote location, using technology such as audio and video communication, remote monitoring devices, and access to electronic health records. Virtual nurses spend an extensive amount of time on computers to provide care, and little is known about how this workflow may affect and contribute to cognitive fatigue. Objective: This study aimed to use eye tracking technology and pupil size variation to determine instances of virtual nurse cognitive fatigue during their typical workflow. Methods: This study examined the virtual nursing workflow by recording and analyzing virtual nurse encounters using eye tracking. This cross-sectional study was conducted during regular 12-hour shifts at a major Southeastern health center in the United States. Results: The study found that 75\% (22/29) of virtual nursing encounters demonstrated a first fatigue instance at 9.8 minutes during patient discharges and at 11.9 minutes during patient admissions. Conclusions: This study provides valuable insights into virtual nursing workflow design and how it may impact the cognitive fatigue levels of nurses providing inpatient virtual care. ", doi="10.2196/67111", url="https://humanfactors.jmir.org/2025/1/e67111" } @Article{info:doi/10.2196/59511, author="Baglivo, Francesco and De Angelis, Luigi and Vannini, Federico and Agostini, Antonello and Todaro, Antonio and Torri, Eleonora and Gianolio Lopez, Alberto Giulio and Fui, Margherita and Tomasi, Alberto and Rizzo, Caterina", title="Italian Medical Professionals' Practices, Attitudes, and Knowledge in Travel Medicine: Protocol for a National Survey", journal="JMIR Res Protoc", year="2025", month="Apr", day="21", volume="14", pages="e59511", keywords="travel medicine", keywords="Italy", keywords="cross-sectional", keywords="survey", keywords="KAP", keywords="medical professional", keywords="medical professionals", keywords="Italian", keywords="global health", keywords="epidemiology", keywords="scoping review", keywords="power analysis", keywords="dissemination", keywords="healthcare service", keywords="healthcare services", keywords="survey protocol", keywords="awareness", keywords="assessment", abstract="Background: The evolving global health landscape highlights the importance of travel medicine, making it necessary for health care professionals to understand the epidemiologic profiles among varied traveler populations and keep themselves updated in this rapidly changing field. However, in Italy, travel medicine clinics have significant gaps in resource allocation, staff training, and infrastructure. Objective: This protocol of a cross-sectional study aims to create and validate a questionnaire to assess the knowledge, attitudes, and practices of health care professionals in travel medicine in Italy. The final goal is to provide a tool to evaluate the state of travel medicine, guide training initiatives, and be able to monitor trends over time. Methods: The study population consists of health care professionals who practice travel medicine in Italy. The questionnaire will be developed by adapting an existing English survey and conducting a scoping review to align the questionnaire with contemporary scientific discourse. The validation process includes face validity, content validity, and expert evaluation. The sample size, determined through power analysis, ranges from 218 to 278 participants. The questionnaire will undergo a pilot test on a smaller sample size (10\% of the total) to identify and address any issues. Statistical analysis will include central tendency and dispersion measures, categorical summaries, group comparisons, and regressions. This research received ethical approval, and informed consent will be obtained from all participants. Results: As of July 2024, we completed the questionnaire validation involving 9 experts. The validated version of the questionnaire includes 86 items. Furthermore, we conducted a pilot test on 53 individuals during the SIMVIM (Italian Society of Travel Medicine and Migrations) course on travel medicine held in Lucca, Italy, on June 14, 2024. Conclusions: This cross-sectional study will guide strategic planning and targeting training and awareness activities in areas deemed most critical or lacking. The study's structured approach and periodic assessments will facilitate the identification of educational gaps, the dissemination of best practices, and the overall improvement of health care services for travelers in Italy. International Registered Report Identifier (IRRID): DERR1-10.2196/59511 ", doi="10.2196/59511", url="https://www.researchprotocols.org/2025/1/e59511" } @Article{info:doi/10.2196/56860, author="Low, Yun Shi and Ko, Qianwen Stephanie and Ang, Han Ian Yi", title="Health Care Providers' Experiences and Perceptions With Telehealth Tools in a Hospital-at-Home Program: Mixed Methods Study", journal="JMIR Hum Factors", year="2025", month="Apr", day="17", volume="12", pages="e56860", keywords="telehealth usability", keywords="hospital-at-home", keywords="health care provider experience", keywords="virtual consultation", keywords="vital signs monitoring", keywords="mixed-methods study", keywords="health care provider", keywords="experience", keywords="perception", keywords="telehealth tools", keywords="telehealth", keywords="e-consultations", keywords="teleconsultation", keywords="hospital-based", keywords="home-based", keywords="mobile phone", abstract="Background: The growing demand for hospital-based care, driven by aging populations and constrained resources, has accelerated the adoption of telehealth tools such as teleconsultations and remote monitoring in hospital-at-home (HaH) programs. Despite their increasing use in delivering acute care at home, studies exploring health care providers' experiences and perceptions of these tools within HaH settings remain limited. Objective: This study aimed to understand the experiences and perspectives of health care providers toward teleconsultations and vital signs monitoring systems within a HaH program in Singapore to optimize effectiveness and address challenges in future implementation. Methods: A convergent mixed methods approach that combines qualitative in-depth interviews with an electronic survey designed based on the 5 domains (usefulness, ease of use, effectiveness, reliability, and satisfaction) of the Telehealth Usability Questionnaire was used. Results: In total, 37 surveys and 20 interviews were completed. Participants responded positively to the use of both teleconsultation and vital signs monitoring with a mean total score of each method being 4.55 (SD 0.44) and 4.52 (SD 0.42), respectively. Significantly higher mean ratings were observed among doctors compared with other health care providers for usefulness (P=.03) and ease of use (P=.047) in teleconsultations. Health care providers with fewer years of clinical experience also perceived the use of vital signs monitoring to be more effective (P=.02) and more usable (P=.04) than those with more years of experience. Qualitative analysis identified four themes: (1) benefits of telehealth for health care providers such as improved work convenience, efficiency, and satisfaction; (2) challenges of telehealth implementation relating to communication and technology; (3) perspectives on telehealth impact; and (4) enablers for successful implementation. Comparing both datasets, qualitative findings were aligned with and confirmed quantitative results. Conclusion: This study highlighted the benefits and usability of telehealth among health care providers. However, challenges relating to patient communication, technological issues, and delivery of care were also discussed along with enablers for successful implementation. These insights can inform strategies to optimize future implementation of telehealth in HaH. ", doi="10.2196/56860", url="https://humanfactors.jmir.org/2025/1/e56860" } @Article{info:doi/10.2196/67263, author="Gil-Hern{\'a}ndez, Eva and Carrillo, Irene and Martin-Delgado, Jimmy and Garc{\'i}a-Torres, Daniel and Mira, Joaqu{\'i}n Jos{\'e}", title="Development of a Web-Based Intervention for Middle Managers to Enhance Resilience at the Individual, Team, and Organizational Levels in Health Care Systems: Multiphase Study", journal="JMIR Hum Factors", year="2025", month="Feb", day="5", volume="12", pages="e67263", keywords="resilience", keywords="health care professionals", keywords="web-based intervention", keywords="middle management", keywords="well-being", keywords="patient safety", abstract="Background: Health care institutions face high systemic risk due to the inherent uncertainty and complexity of their operations. This often leads to stressful incidents impacting the well-being of health care professionals, which can compromise the effectiveness of health care systems. Enhancing resilience among health care professionals is essential for maintaining high-quality care and ensuring patient safety. The role of middle managers is essential to ensure the response capacity of individuals and teams. Objective: This study aims to develop a web-based intervention aimed at middle management to enhance individual, team, and organizational resilience. Methods: An observational study was conducted in 3 phases: design, validation, and pilot study. The study was initiated in February 2022 and concluded in June 2023. Phase 1 involved designing the content for the web-based tool based on a comprehensive review of critical elements around resilience. Phase 2 included validation by an international panel of experts who reviewed the tool and rated it according to a structured grid. They were also encouraged to highlight strengths and areas for improvement. Phase 3 involved piloting the tool with health care professionals in Ecuador to refine the platform and assess its effectiveness. A total of 458 people were invited to participate through the Institutional Course on Continuous Improvement in Health Care Quality and Safety offered by the Ministry of Public Health of Ecuador. Results: The tool, eResiliencia, was structured into 2 main blocks: individual and team resilience and organizational resilience. It included videos, images, PDFs, and links to dynamic graphics and additional texts. Furthermore, 13 (65\%) of the 20 experts validated the tool, rating content clarity at an average of 4.5 (SD 0.7) and utility at an average of 4.7 (SD 0.5) out of 5. The average overall satisfaction was 9.3 (SD 0.6) out of 10 points, and feedback on improvements was implemented. A total of 362 health care professionals began the intervention, of which 218 (60.2\%) completed preintervention and postintervention questionnaires, with significant knowledge increases (P<.001). Of the 362 health care professionals, 146 (40.3\%) completed the satisfaction questionnaire, where overall satisfaction was rated at an average of 9.4 (SD 1.1) out of 10 points. Conclusions: The eResiliencia web-based platform provides middle managers with resources to enhance resilience among their teams and their components, promoting better well-being and performance, even under highly stressful events. Future research should focus on long-term impacts and practical applications in diverse clinical settings. ", doi="10.2196/67263", url="https://humanfactors.jmir.org/2025/1/e67263" } @Article{info:doi/10.2196/58077, author="Kelly, Charlotte Sofia and Wegener, Kauffeldt Emilie and Kayser, Lars", title="Creation of Text Vignettes Based on Patient-Reported Data to Facilitate a Better Understanding of the Patient Perspective: Design Study", journal="JMIR Hum Factors", year="2025", month="Feb", day="5", volume="12", pages="e58077", keywords="patient-reported outcome", keywords="text vignette", keywords="data representation", keywords="Readiness and Enablement Index for Health Technology", keywords="understanding", keywords="health care system", keywords="data analysis", keywords="clinical training", keywords="clinician", keywords="health professional", abstract="Background: Patient-reported outcome (PRO) data refer to information systematically reported by patients, or on behalf of patients, without the influence of health care professionals. It is a focal point of the health care system's ambition toward becoming more involving and personalized. It is recognized that PROs provide valuable data. However, despite this recognition, there are challenges related to both patients' and clinicians' accurate interpretations of the quantitative data. To overcome these challenges, this study explores text vignettes as a representation of PROs. Objective: This study aimed to develop data-informed text vignettes based on data from the Readiness and Enablement Index for Health Technology (READHY) instrument as another way of representing PRO data and to examine how these are perceived as understandable and relevant for both patients and clinicians. Methods: The text vignettes were created from participant responses to the READHY instrument, which encompasses health literacy, health education, and eHealth literacy. The text vignettes were created from 13 individual text strings, each corresponding to a scale in the READHY instrument. This study consisted of 3 sequential parts. In part 1, individuals with chronic obstructive pulmonary disease completed the READHY instrument, providing data to be used to create vignettes based on cluster profiles from the READHY instrument. Part 2 focused on the development of scale-based strings representing all READHY dimensions, which were evaluated through iterative cognitive interviews. In part 3, clinicians and patients assessed the understanding and relevance of the text vignettes. Results: Clinicians and patients both understood and related to the text vignettes. Patients viewed the text vignettes as an accurate reflection of their PRO responses, and clinicians perceived the text vignettes as aligned with their understanding of patients' experiences. Conclusions: Text vignettes can be developed using PRO instruments, with individual scales as input strings. This provides an opportunity to present numeric values in a text format that is understandable and recognizable to most patients and clinicians. Challenges with the vignette's language and layout require customization and clinician training to ensure meaningful interpretation. Findings also support the need to expand the study and enhance clinical relevance with alternative or contextually relevant text vignettes. ", doi="10.2196/58077", url="https://humanfactors.jmir.org/2025/1/e58077" } @Article{info:doi/10.2196/53630, author="Sutan, Rosnah and Ismail, Shahida and Ibrahim, Roszita", title="Evaluating the Development, Reliability, and Validation of the Tele-Primary Care Oral Health Clinical Information System Questionnaire: Cross-Sectional Questionnaire Study", journal="JMIR Hum Factors", year="2025", month="Jan", day="29", volume="12", pages="e53630", keywords="telehealth", keywords="electronic health", keywords="eHealth", keywords="public health information system", keywords="psychometric analysis", abstract="Background: Evaluating digital health service delivery in primary health care requires a validated questionnaire to comprehensively assess users' ability to implement tasks customized to the program's needs. Objective: This study aimed to develop, test the reliability of, and validate the Tele-Primary Care Oral Health Clinical Information System (TPC-OHCIS) questionnaire for evaluating the implementation of maternal and child digital health information systems. Methods: A cross-sectional study was conducted in 2 phases. The first phase focused on content item development and was validated by a group of 10 experts using the content validity index. The second phase was to assess its psychometric testing for reliability and validity. Results: A structured questionnaire of 65 items was constructed to assess the TPC-OHCIS delivery for primary health care use based on literature and has been validated by 10 experts, and 319 respondents answered the 65-item TPC-OHCIS questionnaire, with mean item scores ranging from 1.99 (SD 0.67) to 2.85 (SD 1.019). The content validity, reliability, and face validity showed a scale-level content validity index of 0.90, scale-level content validation ratio of 0.90, and item-level face validity index of 0.76, respectively. The internal reliability was calculated as a Cronbach $\alpha$ value of 0.90, with an intraclass correlation coefficient of 0.91. Scales were determined by the scree plot with eigenvalues >1, and 13 subscales were identified based on principal component analysis. The Kaiser-Meyer-Olkin value was 0.90 (P<.049). The total variance explained was 76.07\%, and factor loading scores for all variables were >0.7. The Bartlett test of sphericity, determining construct validity, was found to be significant (P<.049). Conclusions: The TPC-OHCIS questionnaire is valid to be used at the primary health care level to evaluate the TPC-OHCIS implementation. It can assess health care workers' work performance and job acceptance and improve the quality of care. ", doi="10.2196/53630", url="https://humanfactors.jmir.org/2025/1/e53630" } @Article{info:doi/10.2196/64210, author="{\AA}vik Persson, Helene and Castor, Charlotte and Andersson, Nilla and Hyl{\'e}n, Mia", title="Swedish Version of the System Usability Scale: Translation, Adaption, and Psychometric Evaluation", journal="JMIR Hum Factors", year="2025", month="Jan", day="16", volume="12", pages="e64210", keywords="application", keywords="Swedish", keywords="System Usability Scale", keywords="usability", keywords="validation", abstract="Background: The Swedish health care system is undergoing a transformation. eHealth technologies are increasingly being used. The System Usability Scale is a widely used tool, offering a standardized and reliable measure for assessing the usability of digital health solutions. However, despite the existence of several translations of the System Usability Scale into Swedish, none have undergone psychometric validation. This highlights the urgent need for a validated and standardized Swedish version of the System Usability Scale to ensure accurate and reliable usability evaluations. Objective: The aim of the study was to translate and psychometrically evaluate a Swedish version of the System Usability Scale. Methods: The study utilized a 2-phase design. The first phase translated the System Usability Scale into Swedish and the second phase tested the scale's psychometric properties. A total of 62 participants generated a total of 82 measurements. Descriptive statistics were used to visualize participants' characteristics. The psychometric evaluation consisted of data quality, scaling assumptions, and acceptability. Construct validity was evaluated by convergent validity, and reliability was evaluated by internal consistency. Results: The Swedish version of the System Usability Scale demonstrated high conformity with the original version. The scale showed high internal consistency with a Cronbach $\alpha$ of .852 and corrected item-total correlations ranging from 0.454 to 0.731. The construct validity was supported by a significant positive correlation between the System Usability Scale and domain 5 of the eHealth Literacy Questionnaire (P=.001). Conclusions: The Swedish version of the System Usability Scale demonstrated satisfactory psychometric properties. It can be recommended for use in a Swedish context. The positive correlation with domain 5 of the eHealth Literacy Questionnaire further supports the construct validity of the Swedish version of the System Usability Scale, affirming its suitability for evaluating digital health solutions. Additional tests of the Swedish version of the System Usability Scale, for example, in the evaluation of more complex eHealth technology, would further validate the scale. Trial Registration: ClinicalTrials.gov NCT04150120; https://clinicaltrials.gov/study/NCT04150120 ", doi="10.2196/64210", url="https://humanfactors.jmir.org/2025/1/e64210" } @Article{info:doi/10.2196/52498, author="DeLange Martinez, Pauline and Tancredi, Daniel and Pavel, Misha and Garcia, Lorena and Young, M. Heather", title="Technology Acceptance Among Low-Income Asian American Older Adults: Cross-Sectional Survey Analysis", journal="J Med Internet Res", year="2024", month="Nov", day="22", volume="26", pages="e52498", keywords="aged", keywords="older adults", keywords="Asian American", keywords="immigrant", keywords="vulnerable populations", keywords="internet", keywords="information and communications technology", keywords="ICT", keywords="digital divide", keywords="technology acceptance model", keywords="mobile phone", abstract="Background: Studies show that the use of information and communications technologies (ICTs), including smartphones, tablets, computers, and the internet, varies by demographic factors such as age, gender, and educational attainment. However, the connections between ICT use and factors such as ethnicity and English proficiency, especially among Asian American older adults, remain less explored. The technology acceptance model (TAM) suggests that 2 key attitudinal factors, perceived usefulness (PU) and perceived ease of use (PEOU), influence technology acceptance. While the TAM has been adapted for older adults in China, Taiwan, Singapore, and Korea, it has not been tested among Asian American older adults, a population that is heterogeneous and experiences language barriers in the United States. Objective: This study aims to examine the relationships among demographics (age, gender, educational attainment, ethnicity, and English proficiency), PU, PEOU, and ICT use among low-income Asian American older adults. Two outcomes were examined: smartphone use and ICT use, each measured by years of experience and current frequency of use. Methods: This was a secondary data analysis from a cross-sectional baseline survey of the Lighthouse Project, which provided free broadband, ICT devices, and digital literacy training to residents living in 8 affordable senior housing communities across California. This analysis focused on Asian participants aged ?62 years (N=392), specifically those of Korean, Chinese, Vietnamese, Filipino, and other Asian ethnicities (eg, Hmong and Japanese). Hypotheses were examined using descriptive statistics, correlation analysis, and hierarchical regression analysis. Results: Younger age, higher education, and greater English proficiency were positively associated with smartphone use (age: $\beta$=--.202; P<.001; education: $\beta$=.210; P<.001; and English proficiency: $\beta$=.124; P=.048) and ICT use (age: $\beta$=--.157; P=.002; education: $\beta$=.215; P<.001; and English proficiency: $\beta$=.152; P=.01). Male gender was positively associated with PEOU ($\beta$=.111; P=.047) but not with PU ($\beta$=--.031; P=.59), smartphone use ($\beta$=.023; P=.67), or ICT use ($\beta$=.078; P=.16). Ethnicity was a significant predictor of PU (F4,333=5.046; P<.001), PEOU (F4,345=4.299; P=.002), and ICT use (F4,350=3.177; P=.01), with Chinese participants reporting higher levels than Korean participants, who were the reference group ($\beta$=.143; P=.007). PU and PEOU were positively correlated with each other (r=0.139, 95\% CI=0.037-0.237; P=.007), and both were significant predictors of smartphone use (PU: $\beta$=.158; P=.002 and PEOU: $\beta$=.166; P=.002) and ICT use (PU: $\beta$=.117; P=.02 and PEOU: $\beta$=0.22; P<.001), even when controlling for demographic variables. Conclusions: The findings support the use of the TAM among low-income Asian American older adults. In addition, ethnicity and English proficiency are significant predictors of smartphone and ICT use among this population. Future interventions should consider heterogeneity and language barriers of this population to increase technology acceptance and use. ", doi="10.2196/52498", url="https://www.jmir.org/2024/1/e52498" } @Article{info:doi/10.2196/57771, author="Zimmermann, Jannik and Morf, Harriet and Scharf, Florian and Knitza, Johannes and Moeller, Heidi and Muehlensiepen, Felix and Nathrath, Michaela and Orlemann, Till and Voelker, Thomas and Deckers, Merlin", title="German Version of the Telehealth Usability Questionnaire and Derived Short Questionnaires for Usability and Perceived Usefulness in Health Care Assessment in Telehealth and Digital Therapeutics: Instrument Validation Study", journal="JMIR Hum Factors", year="2024", month="Nov", day="21", volume="11", pages="e57771", keywords="mHealth", keywords="mobile health", keywords="telehealth", keywords="usability", keywords="questionnaire validation", keywords="technology acceptance model", keywords="validity", keywords="questionnaire translation", keywords="Net Promoter Scale", keywords="NPS", keywords="usefulness", keywords="autoimmune chronic diseases", keywords="questionnaire", keywords="German", keywords="digital therapeutics", keywords="therapeutics", keywords="feasibility", abstract="Background: The exponential growth of telehealth is revolutionizing health care delivery, but its evaluation has not matched the pace of its uptake. Various forms of assessment, from single-item to more extensive questionnaires, have been used to assess telehealth and digital therapeutics and their usability. The most frequently used questionnaire is the ``Telehealth Usability Questionnaire'' (TUQ). The use of the TUQ is limited by its restricted availability in languages other than English and its feasibility. Objective: The aims of this study were to create a translated German TUQ version and to derive a short questionnaire for patients---``Telehealth Usability and Perceived Usefulness Short Questionnaire for patients'' (TUUSQ). Methods: As a first step, the original 21-item TUQ was forward and back-translated twice. In the second step, 13 TUQ items were selected for their suitability for the general evaluation of telehealth on the basis of expert opinion. These 13 items were surveyed between July 2022 and September 2023 in 4 studies with patients and family members of palliative care, as well as patients with chronic autoimmune diseases, evaluating 13 health care apps, including digital therapeutics and a telehealth system (n1=128, n2=220, n3=30, and n4=12). Psychometric exploratory factor analysis was conducted. Results: The analysis revealed that a parsimonious factor structure with 2 factors (``perceived usefulness in health care'' and ``usability'') is sufficient to describe the patient's perception. Consequently, the questionnaire could be shortened to 6 items without compromising its informativeness. Conclusions: We provide a linguistically precise German version of the TUQ for assessing the usability and perceived usefulness of telehealth. Beyond that, we supply a highly feasible shortened version that is versatile for general use in telehealth, mobile health, and digital therapeutics, which distinguishes between the 2 factors ``perceived usefulness in health care'' and ``usability'' in patients. Trial Registration: German Clinical Trials Register DRKS00030546; https://drks.de/search/de/trial/DRKS00030546 ", doi="10.2196/57771", url="https://humanfactors.jmir.org/2024/1/e57771" } @Article{info:doi/10.2196/60655, author="Craamer, Casper and Timmers, Thomas and Siebelt, Michiel and Kool, Bertijn Rudolf and Diekerhof, Carel and Caron, Jacob Jan and Gosens, Taco and van der Weegen, Walter", title="Completion Rate and Satisfaction With Online Computer-Assisted History Taking Questionnaires in Orthopedics: Multicenter Implementation Report", journal="JMIR Med Inform", year="2024", month="Nov", day="13", volume="12", pages="e60655", keywords="computer-assisted history taking", keywords="history taking", keywords="digital medical interview", keywords="orthopedics", keywords="digital health", keywords="computer-assisted", keywords="cohort study", keywords="orthopedic", keywords="outpatient", keywords="satisfaction", keywords="patient engagement", keywords="medical record", abstract="Background: Collecting the medical history during a first outpatient consultation plays an important role in making a diagnosis. However, it is a time-consuming process, and time is scarce in today's health care environment. The computer-assisted history taking (CAHT) systems allow patients to share their medical history electronically before their visit. Although multiple advantages of CAHT have been demonstrated, adoption in everyday medical practice remains low, which has been attributed to various barriers. Objective: This study aimed to implement a CAHT questionnaire for orthopedic patients in preparation for their first outpatient consultation and analyze its completion rate and added value. Methods: A multicenter implementation study was conducted in which all patients who were referred to the orthopedic department were invited to self-complete the CAHT questionnaire. The primary outcome of the study is the completion rate of the questionnaire. Secondary outcomes included patient and physician satisfaction. These were assessed via surveys and semistructured interviews. Implementation (Results): In total, 5321 patients were invited, and 4932 (92.7\%) fully completed the CAHT questionnaire between April 2022 and July 2022. On average, participants (n=224) rated the easiness of completing the questionnaire at 8.0 (SD 1.9; 0?10 scale) and the satisfaction of the consult at 8.0 (SD 1.7; 0?10 scale). Satisfaction with the outpatient consultation was higher in cases where the given answers were used by the orthopedic surgeon during this consultation (median 8.3, IQR 8.0?9.1 vs median 8.0, IQR 7.0?8.5; P<.001). Physicians (n=15) scored the average added value as 7.8 (SD 1.7; 0?10 scale) and unanimously recognized increased efficiency, better patient engagement, and better medical record completeness. Implementing the patient's answers into the electronic health record was deemed necessary. Conclusions: In this study, we have shown that previously recognized barriers to implementing and adapting CAHT can now be effectively overcome. We demonstrated that almost all patients completed the CAHT questionnaire. This results in reported improvements in both the efficiency and personalization of outpatient consultations. Given the pressing need for personalized health care delivery in today's time-constrained medical environment, we recommend implementing CAHT systems in routine medical practice. ", doi="10.2196/60655", url="https://medinform.jmir.org/2024/1/e60655" } @Article{info:doi/10.2196/55140, author="Han, Tao and Wei, Qinpeng and Wang, Ruike and Cai, Yijin and Zhu, Hongyi and Chen, Jiani and Zhang, Zhiruo and Li, Sisi", title="Service Quality and Patient Satisfaction of Internet Hospitals in China: Cross-Sectional Evaluation With the Service Quality Questionnaire", journal="J Med Internet Res", year="2024", month="Nov", day="8", volume="26", pages="e55140", keywords="service quality", keywords="SERVQUAL", keywords="Service Quality Questionnaire", keywords="internet hospital", keywords="e-hospital", keywords="digital medical care", keywords="health care professionals", keywords="Chinese digital health care", abstract="Background: Internet hospitals, which refer to service platforms that integrate consultation, prescription, payment, and drug delivery based on hospital entities, have been developing at a rapid pace in China since 2014. However, assessments regarding their service quality and patient satisfaction have not been well developed. There is an urgent need to comprehensively evaluate and improve the service quality of internet hospitals. Objective: This study aims to investigate the current status of patients' use of internet hospitals, as well as familiarity and willingness to use internet hospitals, to evaluate patients' expected and perceived service qualities of internet hospitals using the Chinese version of the Service Quality Questionnaire (SERVQUAL-C) with a national representative sample, and to explore the association between service quality of internet hospitals and patients' overall satisfaction toward associated medical platforms. Methods: This cross-sectional survey was conducted through face-to-face or digital interviews from June to September 2022. A total of 1481 outpatient participants (635 men and 846 women; mean age 33.22, SD 13.22). Participants reported their use of internet hospitals, and then rated their expectations and perceptions of service quality toward internet hospitals via the SERVQUAL-C, along with their demographic information. Results: Among the surveyed participants, 51.2\% (n=758) of participants had used internet hospital service or services. Use varied across age, education level, and annual income. Although the majority of them (n=826, 55.8\%) did not know internet hospital services well, 68.1\% (n=1009) of participants expressed the willingness to adopt this service. Service quality evaluation revealed that the perceived service quality did not match with the expectation, especially the responsiveness dimension. Important-performance analysis results further alerted that reliable diagnosis, prompt response, clear feedback pathway, and active feedback handling were typically the services awaiting substantial improvement. More importantly, multiple linear regressions revealed that familiarity and willingness to use internet hospital services were significant predictors of satisfaction, above and over tangibles, reliability, and empathy service perspectives, and demographic characteristics such as gender, age, education level, and annual income. Conclusions: In the future, internet hospitals should focus more on how to narrow the gaps between the expected and perceived service quality. Promotion of internet hospitals should also be facilitated to increase patients' familiarity with and willingness to use these services. ", doi="10.2196/55140", url="https://www.jmir.org/2024/1/e55140" } @Article{info:doi/10.2196/58079, author="Podda, Jessica and Grange, Erica and Susini, Alessia and Tacchino, Andrea and Di Antonio, Federica and Pedull{\`a}, Ludovico and Brichetto, Giampaolo and Ponzio, Michela", title="Italian Version of the mHealth App Usability Questionnaire (Ita-MAUQ): Translation and Validation Study in People With Multiple Sclerosis", journal="JMIR Hum Factors", year="2024", month="Sep", day="30", volume="11", pages="e58079", keywords="mHealth", keywords="multiple sclerosis", keywords="cognitive assessment", keywords="questionnaire validation", keywords="usability", keywords="mHealth app", keywords="mHealth application", keywords="validation study", keywords="MAUQ", keywords="app usability", keywords="telemedicine", keywords="disability", keywords="usability questionnaire", keywords="mobile health", abstract="Background: Telemedicine and mobile health (mHealth) apps have emerged as powerful tools in health care, offering convenient access to services and empowering participants in managing their health. Among populations with chronic and progressive disease such as multiple sclerosis (MS), mHealth apps hold promise for enhancing self-management and care. To be used in clinical practice, the validity and usability of mHealth tools should be tested. The most commonly used method for assessing the usability of electronic technologies are questionnaires. Objective: This study aimed to translate and validate the English version of the mHealth App Usability Questionnaire into Italian (ita-MAUQ) in a sample of people with MS. Methods: The 18-item mHealth App Usability Questionnaire was forward- and back-translated from English into Italian by an expert panel, following scientific guidelines for translation and cross-cultural adaptation. The ita-MAUQ (patient version for stand-alone apps) comprises 3 subscales, which are ease of use, interface and satisfaction, and usefulness. After interacting with DIGICOG-MS (Digital Assessment of Cognitive Impairment in Multiple Sclerosis), a novel mHealth app for cognitive self-assessment in MS, people completed the ita-MAUQ and the System Usability Scale, included to test construct validity of the translated questionnaire. Confirmatory factor analysis, internal consistency, test-retest reliability, and construct validity were assessed. Known-groups validity was examined based on disability levels as indicated by the Expanded Disability Status Scale (EDSS) score and gender. Results: In total, 116 people with MS (female n=74; mean age 47.2, SD 14 years; mean EDSS 3.32, SD 1.72) were enrolled. The ita-MAUQ demonstrated acceptable model fit, good internal consistency (Cronbach $\alpha$=0.92), and moderate test-retest reliability (intraclass coefficient correlation 0.84). Spearman coefficients revealed significant correlations between the ita-MAUQ total score; the ease of use (5 items), interface and satisfaction (7 items), and usefulness subscales; and the System Usability Scale (all P values <.05). Known-group analysis found no difference between people with MS with mild and moderate EDSS (all P values >.05), suggesting that ambulation ability, mainly detected by the EDSS, did not affect the ita-MAUQ scores. Interestingly, a statistical difference between female and male participants concerning the ease of use ita-MAUQ subscale was found (P=.02). Conclusions: The ita-MAUQ demonstrated high reliability and validity and it might be used to evaluate the usability, utility, and acceptability of mHealth apps in people with MS. ", doi="10.2196/58079", url="https://humanfactors.jmir.org/2024/1/e58079" } @Article{info:doi/10.2196/55852, author="Pohl, Petra and Klerfors, Karoline and Kj{\"o}rk, K. Emma", title="Evaluation of a Digital Previsit Tool for Identifying Stroke-Related Health Problems Before a Follow-Up Visit (Part 1): Survey Study", journal="JMIR Hum Factors", year="2024", month="Sep", day="3", volume="11", pages="e55852", keywords="e-health", keywords="stroke", keywords="Strokeh{\"a}lsa", keywords="follow-up", keywords="previsit", keywords="person-centred care", keywords="health literacy", keywords="digital tool", keywords="shared decision-making", keywords="survey", keywords="mobile phone", abstract="Background: Stroke may lead to various disabilities, and a structured follow-up visit is strongly recommended within a few months after an event. To facilitate this visit, the digital previsit tool ``Strokehealth'' was developed for patients to fill out in advance. The concept Strokeh{\"a}lsa (or Strokehealth) was initially developed in-house as a Windows application, later incorporated in 1177.se. Objective: The study's primary objective was to use a patient satisfaction survey to evaluate the digital previsit tool Strokehealth when used before a follow-up visit, with a focus on feasibility and relevance from the perspective of people with stroke. Our secondary objective was to explore the extent to which the previsit tool identified stroke-related health problems. Methods: Between November 2020 and June 2021, a web-based survey was sent to patients who were scheduled for a follow-up visit after discharge from a stroke unit and had recently filled in the previsit tool. The survey covered demographic characteristics, internet habits, and satisfaction rated using 5 response options. Descriptive statistics were used to present data from both the previsit tool and the survey. We also compared the characteristics of those who completed the previsit tool and those who did not, using nonparametric statistics. Free-text responses were thematically analyzed. Results: All patients filling out the previsit tool (80/171; age: median 67, range 32-91 years) were community-dwelling. Most had experienced a mild stroke and reported a median of 2 stroke-related health problems (range 0-8), and they were significantly younger than nonresponders (P<.001). The survey evaluating the previsit tool was completed by 73\% (58/80; 39 men). The majority (48/58, 83\%) reported using the internet daily. Most respondents (56/58, 97\%) were either satisfied (n=15) or very satisfied (n=41) with how well the previsit tool captured their health problems. The highest level of dissatisfaction was related to the response options in Strokehealth (n=5). Based on the free-text answers to the survey, we developed 4 themes. First, Strokehealth was perceived to provide a structure that ensured that issues would be emphasized and considered. Second, user-friendliness and accessibility were viewed as acceptable, although respondents suggested improvements. Third, participants raised awareness about being approached digitally for communication and highlighted the importance of how to be approached. Fourth, their experiences with Strokehealth were influenced by their perceptions of the explanatory texts, the response options, and the possibility of elaborating on their answers in free text. Conclusions: People with stroke considered the freely available previsit tool Strokehealth feasible for preparing in advance for a follow-up visit. Despite high satisfaction with how well the tool captured their health problems, participants indicated that additional free-text responses and revised information could enhance usability. Improvements need to be considered in parallel with qualitative data to ensure that the tool meets patient needs. Trial Registration: Researchweb 275135; https://www.researchweb.org/is/vgr/project/275135 ", doi="10.2196/55852", url="https://humanfactors.jmir.org/2024/1/e55852" } @Article{info:doi/10.2196/57658, author="Wunderlich, Markus Maximilian and Krampe, Henning and Fuest, Kristina and Leicht, Dominik and Probst, Benedikt Moriz and Runge, Julian and Schmid, Sebastian and Spies, Claudia and Wei{\ss}, Bj{\"o}rn and Balzer, Felix and Poncette, Akira-Sebastian and ", title="Evaluating the Construct Validity of the Charit{\'e} Alarm Fatigue Questionnaire using Confirmatory Factor Analysis", journal="JMIR Hum Factors", year="2024", month="Aug", day="8", volume="11", pages="e57658", keywords="patient monitoring", keywords="intensive care unit", keywords="alarm", keywords="alarms", keywords="validity", keywords="validation", keywords="safety", keywords="intensive", keywords="care", keywords="alarm fatigue", keywords="alarm management", keywords="patient safety", keywords="ICU", keywords="alarm system", keywords="alarm system quality", keywords="medical devices", keywords="clinical alarms", keywords="questionnaire", keywords="questionnaires", keywords="warning", keywords="factor analysis", abstract="Background: The Charit{\'e} Alarm Fatigue Questionnaire (CAFQa) is a 9-item questionnaire that aims to standardize how alarm fatigue in nurses and physicians is measured. We previously hypothesized that it has 2 correlated scales, one on the psychosomatic effects of alarm fatigue and the other on staff's coping strategies in working with alarms. Objective: We aimed to validate the hypothesized structure of the CAFQa and thus underpin the instrument's construct validity. Methods: We conducted 2 independent studies with nurses and physicians from intensive care units in Germany (study 1: n=265; study 2: n=1212). Responses to the questionnaire were analyzed using confirmatory factor analysis with the unweighted least-squares algorithm based on polychoric covariances. Convergent validity was assessed by participants' estimation of their own alarm fatigue and exposure to false alarms as a percentage. Results: In both studies, the $\chi$2 test reached statistical significance (study 1: $\chi$226=44.9; P=.01; study 2: $\chi$226=92.4; P<.001). Other fit indices suggested a good model fit (in both studies: root mean square error of approximation <0.05, standardized root mean squared residual <0.08, relative noncentrality index >0.95, Tucker-Lewis index >0.95, and comparative fit index >0.995). Participants' mean scores correlated moderately with self-reported alarm fatigue (study 1: r=0.45; study 2: r=0.53) and weakly with self-perceived exposure to false alarms (study 1: r=0.3; study 2: r=0.33). Conclusions: The questionnaire measures the construct of alarm fatigue as proposed in our previous study. Researchers and clinicians can rely on the CAFQa to measure the alarm fatigue of nurses and physicians. Trial Registration: ClinicalTrials.gov NCT04994600; https://www.clinicaltrials.gov/study/NCT04994600 ", doi="10.2196/57658", url="https://humanfactors.jmir.org/2024/1/e57658" } @Article{info:doi/10.2196/57804, author="Bisby, A. Madelyne and Jones, P. Michael and Staples, Lauren and Dear, Blake and Titov, Nickolai", title="Measurement of Daily Actions Associated With Mental Health Using the Things You Do Questionnaire--15-Item: Questionnaire Development and Validation Study", journal="JMIR Form Res", year="2024", month="Jul", day="22", volume="8", pages="e57804", keywords="daily actions", keywords="depression", keywords="anxiety", keywords="psychometric", keywords="mental health", keywords="questionnaire", keywords="activities", keywords="goals", keywords="plans", keywords="healthy habits", keywords="habits", keywords="treatment-seeking", keywords="treatment", keywords="confirmatory factor analysis", keywords="survey", keywords="adult", keywords="assessment", keywords="digital psychology service", keywords="digital", keywords="psychology", keywords="depression symptoms", keywords="anxiety symptoms", abstract="Background: A large number of modifiable and measurable daily actions are thought to impact mental health. The ``Things You Do'' refers to 5 types of daily actions that have been associated with mental health: healthy thinking, meaningful activities, goals and plans, healthy habits, and social connections. Previous studies have reported the psychometric properties of the Things You Do Questionnaire (TYDQ)--21-item (TYDQ21). The 21-item version, however, has an uneven distribution of items across the 5 aforementioned factors and may be lengthy to administer on a regular basis. Objective: This study aimed to develop and evaluate a brief version of the TYDQ. To accomplish this, we identified the top 10 and 15 items on the TYDQ21 and then evaluated the performance of the 10-item and 15-item versions of the TYDQ in community and treatment-seeking samples. Methods: Using confirmatory factor analysis, the top 2 or 3 items were used to develop the 10-item and 15-item versions, respectively. Model fit, reliability, and validity were examined for both versions in 2 samples: a survey of community adults (n=6070) and adults who completed an assessment at a digital psychology service (n=14,878). Treatment responsivity was examined in a subgroup of participants (n=448). Results: Parallel analysis supported the 5-factor structure of the TYDQ. The brief (10-item and 15-item) versions were associated with better model fit than the 21-item version, as revealed by its comparative fit index, root-mean-square error of approximation, and Tucker-Lewis index. Configural, metric, and scalar invariance were supported. The 15-item version explained more variance in the 21-item scores than the 10-item version. Internal consistency was appropriate (eg, the 15-item version had a Cronbach $\alpha$ of >0.90 in both samples) and there were no marked differences between how the brief versions correlated with validated measures of depression or anxiety symptoms. The measure was responsive to treatment. Conclusions: The 15-item version is appropriate for use as a brief measure of daily actions associated with mental health while balancing brevity and clinical utility. Further research is encouraged to replicate our psychometric evaluation in other settings (eg, face-to-face services). Trial Registration: Australian New Zealand Clinical Trials Registry ACTRN12613000407796; https://tinyurl.com/2s67a6ps ", doi="10.2196/57804", url="https://formative.jmir.org/2024/1/e57804" } @Article{info:doi/10.2196/55443, author="Weichelt, P. Bryan and Burke, Rick and Kieke, Burney and Pilz, Matt and Shimpi, Neel", title="Provider Adoption of mHealth in Rural Patient Care: Web-Based Survey Study", journal="JMIR Hum Factors", year="2024", month="Jun", day="24", volume="11", pages="e55443", keywords="mHealth", keywords="clinician", keywords="physician", keywords="rural", keywords="patient", keywords="mobile", keywords="health care", keywords="adoption", keywords="attitude", keywords="attitudes", keywords="opinion", keywords="perception", keywords="perceptions", keywords="perspective", keywords="perspectives", keywords="acceptance", keywords="mobile health", keywords="app", keywords="apps", keywords="provider", keywords="providers", keywords="physicians", keywords="survey", keywords="surveys", keywords="barrier", keywords="barriers", keywords="digital health", abstract="Background: Physicians and patient-facing caregivers have increasingly used mobile health (mHealth) technologies in the past several years, accelerating during the COVID-19 pandemic. However, barriers and feedback surrounding adoption remain relatively understudied and varied across health systems, particularly in rural areas. Objective: This study aims to identify provider adoption, attitudes, and barriers toward mHealth in a large, multisite, rural US health care system. We investigated (1) mHealth apps that providers use for their own benefit and (2) mHealth apps that a provider uses in conjunction with a patient. Methods: We surveyed all patient-seeing providers within the Marshfield Clinic Health System with a brief, 16-item, web-based survey assessing attitudes toward mHealth, adoption of these technologies, and perceived barriers faced by providers, their peers, and the institution. Survey results were summarized via descriptive statistics, with log-binomial regression and accompanying pairwise analyses, using Kruskal-Wallis and Jonckheere-Terpstra tests for significance, respectively. Respondents were grouped by reported clinical role and specialty. Results: We received a 38\% (n/N=916/2410) response rate, with 60.7\% (n=556) of those sufficiently complete for analyses. Roughly 54.1\% (n=301) of respondents reported mHealth use, primarily around decision-making and supplemental information, with use differing based on provider role and years of experience. Self-reported barriers to using mHealth included a lack of knowledge and time to study mHealth technologies. Providers also reported concerns about patients' internet access and the complexity of mHealth apps to adequately use mHealth technologies. Providers believed the health system's barriers were largely privacy, confidentiality, and legal review concerns. Conclusions: These findings echo similar studies in other health systems, surrounding providers' lack of time and concerns over privacy and confidentiality of patient data. Providers emphasized concerns over the complexity of these technologies for their patients and concerns over patients' internet access to fully use mHealth in their delivery of care. ", doi="10.2196/55443", url="https://humanfactors.jmir.org/2024/1/e55443" } @Article{info:doi/10.2196/55597, author="Vagnetti, Roberto and Camp, Nicola and Story, Matthew and Ait-Belaid, Khaoula and Mitra, Suvobrata and Zecca, Massimiliano and Di Nuovo, Alessandro and Magistro, Daniele", title="Instruments for Measuring Psychological Dimensions in Human-Robot Interaction: Systematic Review of Psychometric Properties", journal="J Med Internet Res", year="2024", month="Jun", day="5", volume="26", pages="e55597", keywords="psychometric", keywords="human-robot interaction", keywords="psychological dimensions", keywords="robot", keywords="assessment", keywords="systematic review", abstract="Background: Numerous user-related psychological dimensions can significantly influence the dynamics between humans and robots. For developers and researchers, it is crucial to have a comprehensive understanding of the psychometric properties of the available instruments used to assess these dimensions as they indicate the reliability and validity of the assessment. Objective: This study aims to provide a systematic review of the instruments available for assessing the psychological aspects of the relationship between people and social and domestic robots, offering a summary of their psychometric properties and the quality of the evidence. Methods: A systematic review was conducted following the PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) guidelines across different databases: Scopus, PubMed, and IEEE Xplore. The search strategy encompassed studies meeting the following inclusion criteria: (1) the instrument could assess psychological dimensions related to social and domestic robots, including attitudes, beliefs, opinions, feelings, and perceptions; (2) the study focused on validating the instrument; (3) the study evaluated the psychometric properties of the instrument; (4) the study underwent peer review; and (5) the study was in English. Studies focusing on industrial robots, rescue robots, or robotic arms or those primarily concerned with technology validation or measuring anthropomorphism were excluded. Independent reviewers extracted instrument properties and the methodological quality of their evidence following the Consensus-Based Standards for the Selection of Health Measurement Instruments guidelines. Results: From 3828 identified records, the search strategy yielded 34 (0.89\%) articles that validated and examined the psychometric properties of 27 instruments designed to assess individuals' psychological dimensions in relation to social and domestic robots. These instruments encompass a broad spectrum of psychological dimensions. While most studies predominantly focused on structural validity (24/27, 89\%) and internal consistency (26/27, 96\%), consideration of other psychometric properties was frequently inconsistent or absent. No instrument evaluated measurement error and responsiveness despite their significance in the clinical context. Most of the instruments (17/27, 63\%) were targeted at both adults and older adults (aged ?18 years). There was a limited number of instruments specifically designed for children, older adults, and health care contexts. Conclusions: Given the strong interest in assessing psychological dimensions in the human-robot relationship, there is a need to develop new instruments using more rigorous methodologies and consider a broader range of psychometric properties. This is essential to ensure the creation of reliable and valid measures for assessing people's psychological dimensions regarding social and domestic robots. Among its limitations, this review included instruments applicable to both social and domestic robots while excluding those for other specific types of robots (eg, industrial robots). ", doi="10.2196/55597", url="https://www.jmir.org/2024/1/e55597", url="http://www.ncbi.nlm.nih.gov/pubmed/38682783" } @Article{info:doi/10.2196/55169, author="Yin, Zhijun and Stratton, Lauren and Song, Qingyuan and Ni, Congning and Song, Lijun and Commiskey, Patricia and Chen, Qingxia and Moreno, Monica and Fazio, Sam and Malin, Bradley", title="Perceptions and Utilization of Online Peer Support Among Informal Dementia Caregivers: Survey Study", journal="JMIR Aging", year="2024", month="May", day="31", volume="7", pages="e55169", keywords="informal dementia caregiver", keywords="online health community", keywords="social support", keywords="survey", keywords="online peer support", keywords="caregiving challenges", abstract="Background: Informal dementia caregivers are those who care for a person living with dementia and do not receive payment (eg, family members, friends, or other unpaid caregivers). These informal caregivers are subject to substantial mental, physical, and financial burdens. Online communities enable these caregivers to exchange caregiving strategies and communicate experiences with other caregivers whom they generally do not know in real life. Research has demonstrated the benefits of peer support in online communities, but this research is limited, focusing merely on caregivers who are already online community users. Objective: We aimed to investigate the perceptions and utilization of online peer support through a survey. Methods: Following the Andersen and Newman Framework of Health Services Utilization and using REDCap (Research Electronic Data Capture), we designed and administered a survey to investigate the perceptions and utilization of online peer support among informal dementia caregivers. Specifically, we collected types of information that influence whether an informal dementia caregiver accesses online peer support: predisposing factors, which refer to the sociocultural characteristics of caregivers, relationships between caregivers and people living with dementia, and belief in the value of online peer support; enabling factors, which refer to the logistic aspects of accessing online peer support (eg, eHealth literacy and access to high-speed internet); and need factors, which are the most immediate causes of seeking online peer support. We also collected data on caregivers' experiences with accessing online communities. We distributed the survey link on November 14, 2022, within two online locations: the Alzheimer's Association website (as an advertisement) and ALZConnected (an online community organized by the Alzheimer's Association). We collected all responses on February 23, 2023, and conducted a regression analysis to identifyn factors that were associated with accessing online peer support. Results: We collected responses from 172 dementia caregivers. Of these participants, 140 (81.4\%) completed the entire survey. These caregivers were aged 19 to 87 (mean 54, SD 13.5) years, and a majority were female (123/140, 87.9\%) and White (126/140, 90\%). Our findings show that the behavior of accessing any online community was significantly associated with participants' belief in the value of online peer support (P=.006). Moreover, of the 40 non--online community caregivers, 33 (83\%) had a belief score above 24---the score that was assigned when a neutral option was selected for each belief question. The most common reasons for not accessing any online community were having no time to do so (14/140, 10\%) and having insufficient online information--searching skills (9/140, 6.4\%). Conclusions: Our findings suggest that online peer support is valuable, but practical strategies are needed to assist informal dementia caregivers who have limited time or online information--searching skills. ", doi="10.2196/55169", url="https://aging.jmir.org/2024/1/e55169" } @Article{info:doi/10.2196/49227, author="Hern{\'a}ndez Encuentra, Eul{\`a}lia and Robles, Noem{\'i} and Angulo-Brunet, Ariadna and Cullen, David and del Arco, Ignacio", title="Spanish and Catalan Versions of the eHealth Literacy Questionnaire: Translation, Cross-Cultural Adaptation, and Validation Study", journal="J Med Internet Res", year="2024", month="May", day="10", volume="26", pages="e49227", keywords="eHealth literacy", keywords="eHealth", keywords="digital health", keywords="health literacy", keywords="questionnaire", keywords="eHealth Literacy Questionnaire", keywords="eHLQ", keywords="validation", abstract="Background: The rise of digital health services, especially following the outbreak of COVID-19, has led to a need for health literacy policies that respond to people's needs.?Spain is a country with a highly developed digital health infrastructure, but there are currently no tools available to measure digital health literacy fully. A well-thought-through questionnaire with strong psychometric properties such as the eHealth Literacy Questionnaire (eHLQ) is important to assess people's eHealth literacy levels, especially in the context of a fast-growing field such as digital health. Objective: This study aims to adapt the eHLQ and gather evidence of its psychometric quality in 2 of Spain's official languages: Spanish and Catalan. Methods: A systematic cultural adaptation process was followed. Data from Spanish-speaking (n=400) and Catalan-speaking (n=400) people were collected. Confirmatory factor analysis was used to confirm the previously established factor structure. For reliability, the Cronbach $\alpha$ and categorical $\omega$ were obtained for every subscale. Evidence of convergent and discriminant validity was provided through the correlation with the total score of the eHealth Literacy Scale. Evidence based on relations to other variables was evaluated by examining extreme values for educational level, socioeconomic level, and use of technology variables. Results: Regarding the confirmatory factor analysis, the 7-factor correlated model and the 7 one-factor models had adequate goodness-of-fit indexes for both Spanish and Catalan. Moreover, measurement invariance was established between the Spanish and Catalan versions. Reliability estimates were considered adequate as all the scales in both versions had values of >0.80. For convergent and discriminant validity evidence, the eHealth Literacy Scale showed moderate correlation with eHLQ scales in both versions (Spanish: range 0.57-0.76 and P<.001; Catalan: range 0.41-0.64 and P<.001). According to the relationship with external variables, all the eHLQ scales in both languages could discriminate between the maximum and minimum categories in level of education, socioeconomic level, and level of technology use. Conclusions: The Spanish and Catalan versions of the eHLQ appear to be psychometrically sound questionnaires for assessing digital health literacy. They could both be useful tools in Spain and Catalonia for researchers, policy makers, and health service managers to explore people's needs, skills, and competencies and provide interesting insights into their interactions and engagement regarding their own experiences with digital health services, especially in the context of digital health growth in Spain. ", doi="10.2196/49227", url="https://www.jmir.org/2024/1/e49227", url="http://www.ncbi.nlm.nih.gov/pubmed/38728072" } @Article{info:doi/10.2196/57963, author="Vujkovic, Branko and Brkovic, Voin and Paji{\v c}i{\'c}, Ana and Pavlovic, Vedrana and Stanisavljevic, Dejana and Krajnovi{\'c}, Du{\vs}anka and Jovic Vranes, Aleksandra", title="Serbian Version of the eHealth Literacy Questionnaire (eHLQ): Translation, Cultural Adaptation, and Validation Study Among Primary Health Care Users", journal="J Med Internet Res", year="2024", month="May", day="9", volume="26", pages="e57963", keywords="eHealth", keywords="digital health", keywords="eHLQ", keywords="eHealth Literacy Questionnaire", keywords="digital health literacy", keywords="primary healthcare", keywords="Serbia", keywords="questionnaire", keywords="technology", keywords="communication", abstract="Background: As digital health services are increasingly developing and becoming more interactive in Serbia, a comprehensive instrument for measuring eHealth literacy (EHL) is needed. Objective: This study aimed to translate, culturally adapt, and investigate the psychometric properties of the Serbian version of the eHealth Literacy Questionnaire (eHLQ); to evaluate EHL in the population of primary health care (PHC) users in Serbia; and to explore factors associated with their EHL. Methods: The validation study was conducted in 8 PHC centers in the territory of the Ma{\v c}va district in Western Serbia. A stratified sampling method was used to obtain a representative sample. The Translation Integrity Procedure was followed to adapt the questionnaire to the Serbian language. The psychometric properties of the Serbian version of the eHLQ were analyzed through the examination of factorial structure, internal consistency, and test-retest reliability. Descriptive statistics were calculated to determine participant characteristics. Differences between groups were tested by the 2-tailed Students t test and ANOVA. Univariable and multivariable linear regression analyses were used to determine factors related to EHL. Results: A total of 475 PHC users were enrolled. The mean age was 51.0 (SD 17.3; range 19-94) years, and most participants were female (328/475, 69.1\%). Confirmatory factor analysis validated the 7-factor structure of the questionnaire. Values for incremental fit index (0.96) and comparative fit index (0.95) were above the cutoff of ?0.95. The root mean square error of approximation value of 0.05 was below the suggested value of ?0.06. Cronbach $\alpha$ of the entire scale was 0.95, indicating excellent scale reliability, with Cronbach $\alpha$ ranging from 0.81 to 0.90 for domains. The intraclass correlation coefficient ranged from 0.63 to 0.82, indicating moderate to good test-retest reliability. The highest EHL mean scores were obtained for the understanding of health concepts and language (mean 2.86, SD 0.32) and feel safe and in control (mean 2.89, SD 0.33) domains. Statistically significant differences (all P<.05) for all 7 eHLQ scores were observed for age, education, perceived material status, perceived health status, searching for health information on the internet, and occupation (except domain 4). In multivariable regression models, searching for health information on the internet and being aged younger than 65 years were associated with higher values of all domain scores except the domain feel safe and in control for variable age. Conclusions: This study demonstrates that the Serbian version of the eHLQ can be a useful tool in the measurement of EHL and in the planning of digital health interventions at the population and individual level due to its strong psychometric properties in the Serbian context. ", doi="10.2196/57963", url="https://www.jmir.org/2024/1/e57963", url="http://www.ncbi.nlm.nih.gov/pubmed/38722675" } @Article{info:doi/10.2196/53596, author="Browne, Sara and Umlauf, Anya and Moore, J. David and Benson, A. Constance and Vaida, Florin", title="User Experience of Persons Using Ingestible Sensor--Enabled Pre-Exposure Prophylaxis to Prevent HIV Infection: Cross-Sectional Survey Study", journal="JMIR Mhealth Uhealth", year="2024", month="May", day="3", volume="12", pages="e53596", keywords="ingestible sensor", keywords="sensor", keywords="sensors", keywords="oral", keywords="UX", keywords="user experience", keywords="HIV prevention", keywords="medication adherence", keywords="HIV", keywords="prevention", keywords="prophylaxis", keywords="STI", keywords="STD", keywords="sexually transmitted", keywords="sexual transmission", keywords="drug", keywords="drugs", keywords="pharmacy", keywords="pharmacies", keywords="pharmacology", keywords="pharmacotherapy", keywords="pharmaceutic", keywords="pharmaceutics", keywords="pharmaceuticals", keywords="pharmaceutical", keywords="medication", keywords="medications", keywords="adherence", keywords="compliance", keywords="sexually transmitted infection", keywords="sexually transmitted disease", abstract="Background: A digital health technology's success or failure depends on how it is received by users. Objectives: We conducted a user experience (UX) evaluation among persons who used the Food and Drug Administration--approved Digital Health Feedback System incorporating ingestible sensors (ISs) to capture medication adherence, after they were prescribed oral pre-exposure prophylaxis (PrEP) to prevent HIV infection. We performed an association analysis with baseline participant characteristics, to see if ``personas'' associated with positive or negative UX emerged. Methods: UX data were collected upon exit from a prospective intervention study of adults who were HIV negative, prescribed oral PrEP, and used the Digital Health Feedback System with IS-enabled tenofovir disoproxil fumarate plus emtricitabine (IS-Truvada). Baseline demographics; urine toxicology; and self-report questionnaires evaluating sleep (Pittsburgh Sleep Quality Index), self-efficacy, habitual self-control, HIV risk perception (Perceived Risk of HIV Scale 8-item), and depressive symptoms (Patient Health Questionnaire--8) were collected. Participants with ?28 days in the study completed a Likert-scale UX questionnaire of 27 questions grouped into 4 domain categories: overall experience, ease of use, intention of future use, and perceived utility. Means and IQRs were computed for participant total and domain subscores, and linear regressions modeled baseline participant characteristics associated with UX responses. Demographic characteristics of responders versus nonresponders were compared using the Fisher exact and Wilcoxon rank-sum tests. Results: Overall, 71 participants were enrolled (age: mean 37.6, range 18-69 years; n=64, 90\% male; n=55, 77\% White; n=24, 34\% Hispanic; n=68, 96\% housed; and n=53, 75\% employed). No demographic differences were observed in the 63 participants who used the intervention for ?28 days. Participants who completed the questionnaire were more likely to be housed (52/53, 98\% vs 8/10, 80\%; P=.06) and less likely to have a positive urine toxicology (18/51, 35\% vs 7/10, 70\%; P=.08), particularly methamphetamine (4/51, 8\% vs 4/10, 40\%; P=.02), than noncompleters. Based on IQR values, ?75\% of participants had a favorable UX based on the total score (median 3.78, IQR 3.17-4.20), overall experience (median 4.00, IQR 3.50-4.50), ease of use (median 3.72, IQR 3.33-4.22), and perceived utility (median 3.72, IQR 3.22-4.25), and ?50\% had favorable intention of future use (median 3.80, IQR 2.80-4.40). Following multipredictor modeling, self-efficacy was significantly associated with the total score (0.822, 95\% CI 0.405-1.240; P<.001) and all subscores (all P<.05). Persons with more depressive symptoms reported better perceived utility (P=.01). Poor sleep was associated with a worse overall experience (?0.07, 95\% CI ?0.133 to ?0.006; P=.03). Conclusions: The UX among persons using IS-enabled PrEP (IS-Truvada) to prevent HIV infection was positive. Association analysis of baseline participant characteristics linked higher self-efficacy with positive UX, more depressive symptoms with higher perceived utility, and poor sleep with negative UX. Trial Registration: ClinicalTrials.gov NCT03693040; https://clinicaltrials.gov/study/NCT03693040 ", doi="10.2196/53596", url="https://mhealth.jmir.org/2024/1/e53596" } @Article{info:doi/10.2196/54581, author="Thunstr{\"o}m, Osmanovic Almira and Carlsen, Krage Hanne and Ali, Lilas and Larson, Tomas and Hellstr{\"o}m, Andreas and Steingrimsson, Steinn", title="Usability Comparison Among Healthy Participants of an Anthropomorphic Digital Human and a Text-Based Chatbot as a Responder to Questions on Mental Health: Randomized Controlled Trial", journal="JMIR Hum Factors", year="2024", month="Apr", day="29", volume="11", pages="e54581", keywords="chatbot", keywords="chatbots", keywords="chat-bot", keywords="chat-bots", keywords="text-only chatbot, voice-only chatbot", keywords="mental health", keywords="mental illness", keywords="mental disease", keywords="mental diseases", keywords="mental illnesses", keywords="mental health service", keywords="mental health services", keywords="interface", keywords="system usability", keywords="usability", keywords="digital health", keywords="machine learning", keywords="ML", keywords="artificial intelligence", keywords="AI", keywords="algorithm", keywords="algorithms", keywords="NLP", keywords="natural language processing", abstract="Background: The use of chatbots in mental health support has increased exponentially in recent years, with studies showing that they may be effective in treating mental health problems. More recently, the use of visual avatars called digital humans has been introduced. Digital humans have the capability to use facial expressions as another dimension in human-computer interactions. It is important to study the difference in emotional response and usability preferences between text-based chatbots and digital humans for interacting with mental health services. Objective: This study aims to explore to what extent a digital human interface and a text-only chatbot interface differed in usability when tested by healthy participants, using BETSY (Behavior, Emotion, Therapy System, and You) which uses 2 distinct interfaces: a digital human with anthropomorphic features and a text-only user interface. We also set out to explore how chatbot-generated conversations on mental health (specific to each interface) affected self-reported feelings and biometrics. Methods: We explored to what extent a digital human with anthropomorphic features differed from a traditional text-only chatbot regarding perception of usability through the System Usability Scale, emotional reactions through electroencephalography, and feelings of closeness. Healthy participants (n=45) were randomized to 2 groups that used a digital human with anthropomorphic features (n=25) or a text-only chatbot with no such features (n=20). The groups were compared by linear regression analysis and t tests. Results: No differences were observed between the text-only and digital human groups regarding demographic features. The mean System Usability Scale score was 75.34 (SD 10.01; range 57-90) for the text-only chatbot versus 64.80 (SD 14.14; range 40-90) for the digital human interface. Both groups scored their respective chatbot interfaces as average or above average in usability. Women were more likely to report feeling annoyed by BETSY. Conclusions: The text-only chatbot was perceived as significantly more user-friendly than the digital human, although there were no significant differences in electroencephalography measurements. Male participants exhibited lower levels of annoyance with both interfaces, contrary to previously reported findings. ", doi="10.2196/54581", url="https://humanfactors.jmir.org/2024/1/e54581", url="http://www.ncbi.nlm.nih.gov/pubmed/38683664" } @Article{info:doi/10.2196/50889, author="Hellstrand Tang, Ulla and Smith, Frida and Karilampi, Leyla Ulla and Gremyr, Andreas", title="Exploring the Role of Complexity in Health Care Technology Bottom-Up Innovations: Multiple-Case Study Using the Nonadoption, Abandonment, Scale-Up, Spread, and Sustainability Complexity Assessment Tool", journal="JMIR Hum Factors", year="2024", month="Apr", day="26", volume="11", pages="e50889", keywords="digital", keywords="bottom-up innovation", keywords="complexity", keywords="eHealth", keywords="health care", keywords="nonadoption, abandonment, scale-up, spread, and sustainability complexity assessment tool", keywords="NASSS-CAT", keywords="mobile phone", abstract="Background: New digital technology presents new challenges to health care on multiple levels. There are calls for further research that considers the complex factors related to digital innovations in complex health care settings to bridge the gap when moving from linear, logistic research to embracing and testing the concept of complexity. The nonadoption, abandonment, scale-up, spread, and sustainability (NASSS) framework was developed to help study complexity in digital innovations. Objective: This study aims to investigate the role of complexity in the development and deployment of innovations by retrospectively assessing challenges to 4 digital health care innovations initiated from the bottom up. Methods: A multicase retrospective, deductive, and explorative analysis using the NASSS complexity assessment tool LONG was conducted. In total, 4 bottom-up innovations developed in Region V{\"a}stra G{\"o}taland in Sweden were explored and compared to identify unique and shared complexity-related challenges. Results: The analysis resulted in joint insights and individual learning. Overall, the complexity was mostly found outside the actual innovation; more specifically, it related to the organization's readiness to integrate new innovations, how to manage and maintain innovations, and how to finance them. The NASSS framework sheds light on various perspectives that can either facilitate or hinder the adoption, scale-up, and spread of technological innovations. In the domain of condition or diagnosis, a well-informed understanding of the complexity related to the condition or illness (diabetes, cancer, bipolar disorders, and schizophrenia disorders) is of great importance for the innovation. The value proposition needs to be clearly described early to enable an understanding of costs and outcomes. The questions in the NASSS complexity assessment tool LONG were sometimes difficult to comprehend, not only from a language perspective but also due to a lack of understanding of the surrounding organization's system and its setting. Conclusions: Even when bottom-up innovations arise within the same support organization, the complexity can vary based on the developmental phase and the unique characteristics of each project. Identifying, defining, and understanding complexity may not solve the issues but substantially improves the prospects for successful deployment. Successful innovation within complex organizations necessitates an adaptive leadership and structures to surmount cultural resistance and organizational impediments. A rigid, linear, and stepwise approach risks disregarding interconnected variables and dependencies, leading to suboptimal outcomes. Success lies in embracing the complexity with its uncertainty, nurturing creativity, and adopting a nonlinear methodology that accommodates the iterative nature of innovation processes within complex organizations. ", doi="10.2196/50889", url="https://humanfactors.jmir.org/2024/1/e50889", url="http://www.ncbi.nlm.nih.gov/pubmed/38669076" } @Article{info:doi/10.2196/45985, author="Lin, Shuhong and Chen, Xinxin and Tan, Linxiang and Liao, Zhenjiang and Li, Yifan and Tang, Ying and Huang, Qiuping and Shen, Hongxian", title="Psychometric Properties of the Metacognitions About Online Gaming Scale in the Chinese Population and Its Relationship With Internet Gaming Disorder: Cross-Sectional Study", journal="JMIR Serious Games", year="2024", month="Apr", day="22", volume="12", pages="e45985", keywords="metacognition", keywords="metacognitions about online gaming", keywords="Internet Gaming Disorder", keywords="psychometric properties", keywords="Chinese", abstract="Background: Metacognitions about online gaming have been shown to be correlated with Internet Gaming Disorder (IGD). Knowledge of metacognitions about online gaming can help to understand IGD. The Metacognitions about Online Gaming Scale (MOGS) is a reliable and valid tool to measure specific metacognitions about online gaming in both adults and adolescents, which is lacking in China. Objective: This study was conducted to assess the psychometric properties of the Chinese version of the MOGS (C-MOGS) and its relationship with IGD in the Chinese population. Methods: A total of 772 Chinese individuals (age: mean 21.70, SD 8.81 years; age range: 13-57 years; 458/772, 59.3\% male) completed a web-based questionnaire survey, including the C-MOGS and a battery of validated scales measuring IGD, gaming motives, depression, and anxiety. Results: Through exploratory and confirmatory factor analyses, the 3-factor structure was confirmed to have adequate model fit and internal consistency reliability (Cronbach $\alpha$?.799, Guttman split-half coefficients?0.754). Concurrent validity of the C-MOGS was supported by its correlations with IGD (P<.001), gaming motives (P<.001), depression (P<.001), and anxiety (P<.001). Furthermore, the incremental validity analysis showed that the C-MOGS predicted 13\% of the variance in IGD while controlling for gender, age, weekly gaming hours, gaming motives, depression, and anxiety. Conclusions: This study provides evidence that the psychometric properties of the C-MOGS are appropriate and emphasizes its positive association with IGD. The C-MOGS is a reliable and valid instrument for mental health workers to assess metacognitions about online gaming in the Chinese population. ", doi="10.2196/45985", url="https://games.jmir.org/2024/1/e45985", url="http://www.ncbi.nlm.nih.gov/pubmed/38648634" } @Article{info:doi/10.2196/51616, author="Choi, JiYeon and Choi, Seongmi and Song, Kijun and Baek, Jiwon and Kim, Heejung and Choi, Mona and Kim, Yesol and Chu, Hui Sang and Shin, Jiyoung", title="Everyday Digital Literacy Questionnaire for Older Adults: Instrument Development and Validation Study", journal="J Med Internet Res", year="2023", month="Dec", day="14", volume="25", pages="e51616", keywords="aging", keywords="older adults", keywords="digital literacy", keywords="instrument", keywords="validation", keywords="psychometrics", keywords="European Commission's Digital Competence framework", abstract="Background: The need for digital literacy in aging populations is increasing in the digitalizing society. Digital literacy involves the identification, evaluation, and communication of information through various digital devices or relevant programs. Objective: The aims of this study were to develop an Everyday Digital Literacy Questionnaire (EDLQ), a digital literacy assessment scale, and subsequently evaluate its psychometric properties using a population of community-dwelling older adults in South Korea. Methods: The EDLQ was developed using an instrument development design. A nationwide survey was conducted, and the study included 1016 community-dwelling older adults (age ?60 years). To evaluate the psychometric properties, the participants were randomly divided into 2 groups (n=508 each), and the internal consistency (Cronbach $\alpha$ and McDonald $\omega$), structural validity (exploratory factor analysis and confirmatory factor analysis), hypothesis-testing construct validity using the eHealth Literacy Scale (eHEALS), and measurement invariance were analyzed. Results: Among the initial 30 items of the EDLQ, 22 items with a 3-factor solution had a total explained variance of 77\%. The domains included ``information and communication'' (9 items), ``content creation and management'' (4 items), and ``safety and security'' (9 items). Confirmatory factor analysis was conducted with this 3-factor solution ($\chi$2206=345.1; normed $\chi$2206=1.7; comparative fit index=0.997; Tucker-Lewis index=0.997; root-mean-square error of approximation=0.036; standardized root-mean-square residual=0.050; composite reliability=0.903-0.959; average variance extracted=0.699-0.724; R2=0.616-0.773). Hypothesis-testing construct validity with the eHEALS revealed a strong correlation (r=0.75). Cronbach $\alpha$ and McDonald $\omega$ coefficients were .98 and 0.98, respectively. The fit indices for measurement invariance, including the configural, metric, and scalar invariance models, demonstrated a satisfactory fit to the data. Our findings suggest that the psychometric properties of the 22-item EDLQ are valid and reliable for assessing digital literacy among older Korean adults. Conclusions: In this study, we developed a digital literacy measure with strong psychometric properties that made it suitable for assessing the digital literacy of community-dwelling older adults in Korea. To broaden its applicability, however, further assessment of its feasibility for use with different languages and cultures is necessary. Moreover, more empirical research on digital literacy and related factors in older adults can facilitate the development of personalized digital health care services and educational interventions in the digital society. ", doi="10.2196/51616", url="https://www.jmir.org/2023/1/e51616", url="http://www.ncbi.nlm.nih.gov/pubmed/38095999" } @Article{info:doi/10.2196/46379, author="Curran, Janet and Wozney, Lori and Tavender, Emma and Wilson, Catherine and Ritchie, C. Krista and Wong, Helen and Gallant, Allyson and Somerville, Mari and Archambault, M. Patrick and Cassidy, Christine and Jabbour, Mona and Mackay, Rebecca and Plint, C. Amy", title="Implementing Electronic Discharge Communication Tools in Pediatric Emergency Departments: Multicountry, Cross-Sectional Readiness Survey of Nurses and Physicians", journal="JMIR Hum Factors", year="2023", month="Oct", day="11", volume="10", pages="e46379", keywords="discharge communication", keywords="pediatric", keywords="emergency department", keywords="medical informatics", keywords="implementation science", keywords="electronic medical record", keywords="mobile phone", abstract="Background: Pediatric emergency departments (ED) in many countries are implementing electronic tools such as kiosks, mobile apps, and electronic patient portals, to improve the effectiveness of discharge communication. Objective: This study aimed to survey nurse and physician readiness to adopt these tools. Methods: An electronic, cross-sectional survey was distributed to a convenience sample of currently practicing ED nurses and physicians affiliated with national pediatric research organizations in Canada, Australia, and New Zealand. Survey development was informed by the nonadoption, abandonment, scale-up, spread, sustainability framework. Measures of central tendency, and parametric and nonparametric tests were used to describe and compare nurse and physician responses. Results: Out of the 270 participants, the majority were physicians (61\%, 164/270), female (65\%, 176/270), and had 5 or more years of ED experience (76\%, 205/270). There were high levels of consensus related to the value proposition of electronic discharge communication tools (EDCTs) with 82\% (221/270) of them agreeing that they help parents and patients with comprehension and recall. Lower levels of consensus were observed for organizational factors with only 37\% (100/270) agreeing that their staff is equipped to handle challenges with communication technologies. Nurses and physicians showed significant differences on 3 out of 21 readiness factors. Compared to physicians, nurses were significantly more likely to report that EDs have a responsibility to integrate EDCTs as part of a modern system (P<.001) and that policies are in place to guide safe and secure electronic communication (P=.02). Physicians were more likely to agree that using an EDCT would change their routine tasks (P=.04). One third (33\%, 89/270) of participants indicated that they use or have used EDCT. Conclusions: Despite low levels of uptake, both nurses and physicians in multiple countries view EDCTs as a valuable support to families visiting pediatric ED. Leadership for technology change, unclear impact on workflow, and disparities in digital literacy skills require focused research effort. ", doi="10.2196/46379", url="https://humanfactors.jmir.org/2023/1/e46379", url="http://www.ncbi.nlm.nih.gov/pubmed/37819696" } @Article{info:doi/10.2196/42843, author="Monkman, Helen and Griffith, Janessa and MacDonald, Leah and Lesselroth, Blake", title="Consumers' Needs for Laboratory Results Portals: Questionnaire Study", journal="JMIR Hum Factors", year="2023", month="Jun", day="12", volume="10", pages="e42843", keywords="consumer health information", keywords="user-centered design", keywords="clinical laboratory information systems", keywords="laboratory test result", keywords="patient portal", keywords="laboratory result", keywords="facilitator", keywords="barrier", keywords="information system", keywords="questionnaire", keywords="usability", abstract="Background: Over the last decade, there has been an increase in the number of health care consumers (ie, patients, citizens, and laypeople) with access to their laboratory results through portals. However, many portals are not designed with the consumer in mind, which can limit communication effectiveness and consumer empowerment. Objective: We aimed to study design facilitators and barriers affecting consumer use of a laboratory results portal. We sought to identify modifiable design attributes to inform future interface specifications and improve patient safety. Methods: A web-based questionnaire with open- and closed-ended items was distributed to consumers in British Columbia, Canada. Open-ended items with affinity diagramming and closed-ended questions with descriptive statistics were analyzed. Results: Participants (N=30) preferred reviewing their laboratory results through portals rather than waiting to see their provider. However, respondents were critical of the interface design (ie, interface usability, information completeness, and display clarity). Scores suggest there are display issues impacting communication that require urgent attention. Conclusions: There are modifiable usability, content, and display issues associated with laboratory results portals that, if addressed, could arguably improve communication effectiveness, patient empowerment, and health care safety. ", doi="10.2196/42843", url="https://humanfactors.jmir.org/2023/1/e42843", url="http://www.ncbi.nlm.nih.gov/pubmed/37307049" } @Article{info:doi/10.2196/44241, author="Ataya, Jawdat and Jamous, Issam and Dashash, Mayssoon", title="Measurement of Humanity Among Health Professionals: Development and Validation of the Medical Humanity Scale Using the Delphi Method", journal="JMIR Form Res", year="2023", month="May", day="2", volume="7", pages="e44241", keywords="medical humanity", keywords="Medical Humanitarian Scale", keywords="scale", keywords="humanity", keywords="humanitarian", keywords="humane", keywords="Hippocratic oath", keywords="Delphi", keywords="development", keywords="patient centered", keywords="compassion", keywords="ethic", keywords="empathy", keywords="empathetic", keywords="validity", keywords="validation", keywords="person centered", keywords="the humanitarian aspect", keywords="students of medical colleges", keywords="Syria", abstract="Background: Despite the importance of humanism in providing health care, there is a lack of valid and reliable tool for assessing humanity among health professionals. Objective: The aim of this study was to design a new humanism scale and to assess the validity of this scale in measuring humanism among Syrian health professional students. Methods: The Medical Humanity Scale (MHS) was designed. It consists of 27 items categorized into 7 human values including patient-oriented care, respect, empathy, ethics, altruism, and compassion. The scale was tested for internal consistency and reliability using Cronbach $\alpha$ and test-retest methods. The construct validity of the scale was also tested to assess the ability of the scale in differentiating between groups of health professional students with different levels of medical humanity. A 7-point Likert scale was adopted. The study included 300 participants including 97 medical, 78 dental, 82 pharmacy, and 43 preparatory-year students from Syrian universities. The Delphi method was used and factors analysis was performed. Bartlett test of sphericity and the Kaiser-Meyer-Olkin measure of sample adequacy were used. The number of components was extracted using principal component analysis. Results: The mean score of the MHS was 158.7 (SD 11.4). The MHS mean score of female participants was significantly higher than the mean score of male participants (159.59, SD 10.21 vs 155.48, SD 14.35; P=.008). The MHS mean score was significantly lower in dental students (154.12, SD 1.45; P=.005) than the mean scores of medical students (159.77, SD 1.02), pharmacy students (161.40, SD 1.05), and preparatory-year students (159.05, SD 1.94). However, no significant relationship was found between humanism and academic year (P=.32), university type (P=.34), marital status (P=.64), or financial situation (P=.16). The Kaiser-Meyer-Olkin test (0.730) and Bartlett test of sphericity (1201.611, df=351; P=.01) were performed. Factor analysis indicated that the proportion of variables between the first and second factors was greater than 10\%, confirming that the scale was a single group. The Cronbach $\alpha$ for the overall scale was 0.735, indicating that the scale had acceptable reliability and validity. Conclusions: The results of this study suggest that the MHS is a reliable and valid tool for measuring humanity among health professional students and the development of patient-centered care. ", doi="10.2196/44241", url="https://formative.jmir.org/2023/1/e44241", url="http://www.ncbi.nlm.nih.gov/pubmed/37129940" } @Article{info:doi/10.2196/43348, author="Dong, Zhaogang and Ji, Meng and Shan, Yi and Xu, Xiaofei and Xing, Zhaoquan", title="Functional Health Literacy Among Chinese Populations and Associated Factors: Latent Class Analysis", journal="JMIR Form Res", year="2023", month="Apr", day="28", volume="7", pages="e43348", keywords="functional health literacy", keywords="associated factors", keywords="Chinese populations", keywords="latent class analysis", abstract="Background: Poor functional health literacy has been found to be independently associated with poor self-assessed health, poor understanding of one's health condition and its management, and higher use of health services. Given the importance of functional health literacy, it is necessary to assess the overall status of functional health literacy in the general public. However, the literature review shows that no studies of functional health literacy have been conducted among the Chinese population in China. Objective: This study aimed to classify Chinese populations into different functional health literacy clusters and ascertain significant factors closely associated with low functional health literacy to provide some implications for health education, medical research, and public health policy making. Methods: We hypothesized that the participants' functional health literacy levels were associated with various demographic characteristics. Therefore, we designed a four-section questionnaire including the following information: (1) age, gender, and education; (2) self-assessed disease knowledge; (3) 3 validated health literacy assessment tools (ie, the All Aspects of Health Literacy Scale, the eHealth Literacy Scale, and the 6-item General Health Numeracy Test); and (4) health beliefs and self-confidence measured by the Multidimensional Health Locus of Control Scales Form B. Using randomized sampling, we recruited survey participants from Qilu Hospital affiliated to Shandong University, China. The questionnaire was administered via wenjuanxing. A returned questionnaire was valid only when all question items included were answered, according to our predefined validation criterion. All valid data were coded according to the predefined coding schemes of Likert scales with different point (score) ranges. Finally, we used latent class analysis to classify Chinese populations into clusters of different functional health literacy and identify significant factors closely associated with low functional health literacy. Results: All data in the 800 returned questionnaires proved valid according to the predefined validation criterion. Applying latent class analysis, we classified Chinese populations into low (n=292, 36.5\%), moderate-to-adequate (n=286, 35.7\%), and low-to-moderate (n=222, 27.8\%) functional health literacy groups and identified five factors associated with low communicative health literacy: (1) male gender (aged 40-49 years), (2) lower educational attainment (below diploma), (3) age between 38 and 68 years, (4) lower self-efficacy, and (5) belief that staying healthy was a matter of luck. Conclusions: We classified Chinese populations into 3 functional health literacy groups and identified 5 factors associated with low functional health literacy. These associated factors can provide some implications for health education, medical research, and health policy making. ", doi="10.2196/43348", url="https://formative.jmir.org/2023/1/e43348", url="http://www.ncbi.nlm.nih.gov/pubmed/37115594" } @Article{info:doi/10.2196/38298, author="Vincent, Wilson", title="Developing and Evaluating a Measure of the Willingness to Use Pandemic-Related mHealth Tools Using National Probability Samples in the United States: Quantitative Psychometric Analyses and Tests of Sociodemographic Group Differences", journal="JMIR Form Res", year="2023", month="Feb", day="7", volume="7", pages="e38298", keywords="COVID-19", keywords="psychometric properties", keywords="mHealth", keywords="digital health", keywords="digital screening", keywords="digital tracking", keywords="pandemic", keywords="national survey", keywords="mobile health", keywords="digital health tool", keywords="vulnerable population", keywords="demographic characteristic", keywords="instrument validation", abstract="Background: There are no psychometrically validated measures of the willingness to engage in public health screening and prevention efforts, particularly mobile health (mHealth)--based tracking, that can be adapted to future crises post--COVID-19. Objective: The psychometric properties of a novel measure of the willingness to participate in pandemic-related screening and tracking, including the willingness to use pandemic-related mHealth tools, were tested. Methods: Data were from a cross-sectional, national probability survey deployed in 3 cross-sectional stages several weeks apart to adult residents of the United States (N=6475; stage 1 n=2190, 33.82\%; stage 2 n=2238, 34.56\%; and stage 3 n=2047, 31.62\%) from the AmeriSpeak probability-based research panel covering approximately 97\% of the US household population. Five items asked about the willingness to use mHealth tools for COVID-19--related screening and tracking and provide biological specimens for COVID-19 testing. Results: In the first, exploratory sample, 3 of 5 items loaded onto 1 underlying factor, the willingness to use pandemic-related mHealth tools, based on exploratory factor analysis (EFA). A 2-factor solution, including the 3-item factor, fit the data (root mean square error of approximation [RMSEA]=0.038, comparative fit index [CFI]=1.000, standardized root mean square residual [SRMR]=0.005), and the factor loadings for the 3 items ranged from 0.849 to 0.893. In the second, validation sample, the reliability of the 3-item measure was high (Cronbach $\alpha$=.90), and 1 underlying factor for the 3 items was confirmed using confirmatory factor analysis (CFA): RMSEA=0, CFI=1.000, SRMR=0 (a saturated model); factor loadings ranged from 1.000 to 0.962. The factor was independently associated with COVID-19--preventive behaviors (eg, ``worn a face mask'': r=0.313, SE=0.041, P<.001; ``kept a 6-foot distance from those outside my household'': r=0.282, SE=0.050, P<.001) and the willingness to provide biological specimens for COVID-19 testing (ie, swab to cheek or nose: r=0.709, SE=0.017, P<.001; small blood draw: r=0.684, SE=0.019, P<.001). In the third, multiple-group sample, the measure was invariant, or measured the same thing in the same way (ie, difference in CFI [$\Delta$CFI]<0.010 across all grouping categories), across age groups, gender, racial/ethnic groups, education levels, US geographic region, and population density (ie, rural, suburban, urban). When repeated across different samples, factor-analytic findings were essentially the same. Additionally, there were mean differences ($\Delta$M) in the willingness to use mHealth tools across samples, mainly based on race or ethnicity and population density. For example, in SD units, suburban ($\Delta$M=--0.30, SE=0.13, P=.001) and urban ($\Delta$M=--0.42, SE=0.12, P<.001) adults showed less willingness to use mHealth tools than rural adults in the third sample collected on May 30-June 8, 2020, but no differences were detected in the first sample collected on April 20-26, 2020. Conclusions: Findings showed that the screener is psychometrically valid. It can also be adapted to future public health crises. Racial and ethnic minority adults showed a greater willingness to use mHealth tools than White adults. Rural adults showed more mHealth willingness than suburban and urban adults. Findings have implications for public health screening and tracking and understanding digital health inequities, including lack of uptake. ", doi="10.2196/38298", url="https://formative.jmir.org/2023/1/e38298", url="http://www.ncbi.nlm.nih.gov/pubmed/36689545" } @Article{info:doi/10.2196/40733, author="Shan, Yi and Xing, Zhaoquan and Dong, Zhaogang and Ji, Meng and Wang, Ding and Cao, Xiangting", title="Translating and Adapting the DISCERN Instrument Into a Simplified Chinese Version and Validating Its Reliability: Development and Usability Study", journal="J Med Internet Res", year="2023", month="Feb", day="2", volume="25", pages="e40733", keywords="DISCERN", keywords="translation", keywords="adaptation", keywords="validation", keywords="quality", keywords="patient-targeted health information", keywords="treatment choice", abstract="Background: There is a wide variation in the quality of information available to patients on the treatment of the diseases afflicting them. To help patients find clear and accessible information, many scales have been designed to evaluate the quality of health information, including the Patient Education Materials Assessment Tool; the Suitability Assessment of Materials for evaluation of health-related information for adults; and DISCERN, an instrument for judging the quality of written consumer health information on treatment choices. These instruments are primarily in English. Few of them have been translated and adapted into simplified Chinese tools for health information assessment in China. Objective: This study aimed to translate and adapt DISCERN into the first simplified Chinese version and validate the psychometric properties of this newly developed scale for judging the quality of patient-oriented health information on treatment choices. Methods: First, we translated DISCERN into simplified Chinese using rigorous guidelines for translation and validation studies. We tested the translation equivalence and measured the content validity index. We then presented the simplified Chinese instrument to 3 health educators and asked them to use it to assess the quality of 15 lung cancer--related materials. We calculated the Cohen $\kappa$ coefficient and Cronbach $\alpha$ for all items and for the entire scale to determine the reliability of the new tool. Results: We decided on the simplified Chinese version of the DISCERN instrument (C-DISCERN) after resolving all problems in translation, adaptation, and content validation. The C-DISCERN was valid and reliable: the content validity index was 0.98 (47/48, 98\% of the items) for clarity and 0.94 (45/48, 94\% of the items) for relevance, the Cronbach $\alpha$ for internal consistency was .93 (95\% CI 0.699-1.428) for the whole translated scale, and the Cohen $\kappa$ coefficient for internal consistency was 0.53 (95\% CI 0.417-0.698). Conclusions: C-DISCERN is the first simplified Chinese version of the DISCERN instrument. Its validity and reliability have been attested to assess the quality of patient-targeted information for treatment choices. ", doi="10.2196/40733", url="https://www.jmir.org/2023/1/e40733", url="http://www.ncbi.nlm.nih.gov/pubmed/36729573" } @Article{info:doi/10.2196/38108, author="Dong, Aishu and Huang, Jing and Lin, Shudan and Zhu, Jianing and Zhou, Haitao and Jin, Qianqian and Zhao, Wei and Zhu, Lianlian and Guo, Wenjian", title="Psychometric Properties of the Chinese Warwick-Edinburgh Mental Well-being Scale in Medical Staff: Cross-sectional Study", journal="J Med Internet Res", year="2022", month="Nov", day="30", volume="24", number="11", pages="e38108", keywords="psychometric property", keywords="Chinese Warwick-Edinburgh Mental Well-being Scale", keywords="classical test theory", keywords="well-being", keywords="item response theory", keywords="medical staff", keywords="China", abstract="Background: Worldwide, mental well-being is a critical issue for public health, especially among medical staff; it affects professionalism, efficiency, quality of care delivery, and overall quality of life. Nevertheless, assessing mental well-being is a complex problem. Objective: This study aimed to evaluate the psychometric properties of the Chinese-language version of the 14-item Warwick-Edinburgh Mental Well-being Scale (WEMWBS) in medical staff recruited mainly from 6 hospitals in China and provide a reliable measurement of positive mental well-being. Methods: A cross-sectional online survey was conducted of medical staff from 15 provinces in China from May 15 to July 15, 2020. Confirmatory factor analysis (CFA) was conducted to test the structure of the Chinese WEMWBS. The Spearman correlations of the Chinese WEMWBS with the 5-item World Health Organization Well-Being Index (WHO-5) were used to evaluate convergent validity. The Cronbach $\alpha$ and split-half reliability ($\lambda$) represented internal consistency. A graded response model was adopted for an item response theory (IRT) analysis. We report discrimination, difficulty, item characteristic curves (ICCs), and item information curves (IICs). ICCs and IICs were used to estimate reliability and validity based on the IRT analysis. Results: A total of 572 participants from 15 provinces in China finished the Chinese WEMWBS. The CFA showed that the 1D model was satisfactory and internal consistency reliability was excellent, with $\alpha$=.965 and $\lambda$=0.947, while the item-scale correlation coefficients ranged from r=0.727 to r=0.900. The correlation coefficient between the Chinese WEMWBS and the WHO-5 was significant, at r=0.746. The average variance extraction value was 0.656, and the composite reliability value was 0.964, with good aggregation validity. The discrimination of the Chinese WEMWBS items ranged from 2.026 to 5.098. The ICCs illustrated that the orders of the category thresholds for the 14 items were satisfactory. Conclusions: The Chinese WEMWBS showed good psychometric properties and can measure well-being in medical staff. ", doi="10.2196/38108", url="https://www.jmir.org/2022/11/e38108", url="http://www.ncbi.nlm.nih.gov/pubmed/36449336" } @Article{info:doi/10.2196/32630, author="Ni{\ss}en, Marcia and R{\"u}egger, Dominik and Stieger, Mirjam and Fl{\"u}ckiger, Christoph and Allemand, Mathias and v Wangenheim, Florian and Kowatsch, Tobias", title="The Effects of Health Care Chatbot Personas With Different Social Roles on the Client-Chatbot Bond and Usage Intentions: Development of a Design Codebook and Web-Based Study", journal="J Med Internet Res", year="2022", month="Apr", day="27", volume="24", number="4", pages="e32630", keywords="chatbot", keywords="conversational agent", keywords="social roles", keywords="interpersonal closeness", keywords="social role theory", keywords="working alliance", keywords="design", keywords="persona", keywords="digital health intervention", keywords="web-based experiment", abstract="Background: The working alliance refers to an important relationship quality between health professionals and clients that robustly links to treatment success. Recent research shows that clients can develop an affective bond with chatbots. However, few research studies have investigated whether this perceived relationship is affected by the social roles of differing closeness a chatbot can impersonate and by allowing users to choose the social role of a chatbot. Objective: This study aimed at understanding how the social role of a chatbot can be expressed using a set of interpersonal closeness cues and examining how these social roles affect clients' experiences and the development of an affective bond with the chatbot, depending on clients' characteristics (ie, age and gender) and whether they can freely choose a chatbot's social role. Methods: Informed by the social role theory and the social response theory, we developed a design codebook for chatbots with different social roles along an interpersonal closeness continuum. Based on this codebook, we manipulated a fictitious health care chatbot to impersonate one of four distinct social roles common in health care settings---institution, expert, peer, and dialogical self---and examined effects on perceived affective bond and usage intentions in a web-based lab study. The study included a total of 251 participants, whose mean age was 41.15 (SD 13.87) years; 57.0\% (143/251) of the participants were female. Participants were either randomly assigned to one of the chatbot conditions (no choice: n=202, 80.5\%) or could freely choose to interact with one of these chatbot personas (free choice: n=49, 19.5\%). Separate multivariate analyses of variance were performed to analyze differences (1) between the chatbot personas within the no-choice group and (2) between the no-choice and the free-choice groups. Results: While the main effect of the chatbot persona on affective bond and usage intentions was insignificant (P=.87), we found differences based on participants' demographic profiles: main effects for gender (P=.04, $\eta$p2=0.115) and age (P<.001, $\eta$p2=0.192) and a significant interaction effect of persona and age (P=.01, $\eta$p2=0.102). Participants younger than 40 years reported higher scores for affective bond and usage intentions for the interpersonally more distant expert and institution chatbots; participants 40 years or older reported higher outcomes for the closer peer and dialogical-self chatbots. The option to freely choose a persona significantly benefited perceptions of the peer chatbot further (eg, free-choice group affective bond: mean 5.28, SD 0.89; no-choice group affective bond: mean 4.54, SD 1.10; P=.003, $\eta$p2=0.117). Conclusions: Manipulating a chatbot's social role is a possible avenue for health care chatbot designers to tailor clients' chatbot experiences using user-specific demographic factors and to improve clients' perceptions and behavioral intentions toward the chatbot. Our results also emphasize the benefits of letting clients freely choose between chatbots. ", doi="10.2196/32630", url="https://www.jmir.org/2022/4/e32630", url="http://www.ncbi.nlm.nih.gov/pubmed/35475761" } @Article{info:doi/10.2196/34606, author="Gooch, Daniel and Mehta, Vikram and Stuart, Avelie and Katz, Dmitri and Bennasar, Mohamed and Levine, Mark and Bandara, Arosha and Nuseibeh, Bashar and Bennaceur, Amel and Price, Blaine", title="Designing Tangibles to Support Emotion Logging for Older Adults: Development and Usability Study", journal="JMIR Hum Factors", year="2022", month="Apr", day="27", volume="9", number="2", pages="e34606", keywords="older adults", keywords="health", keywords="emotion", keywords="affect", keywords="well-being", keywords="tangible interaction", keywords="TUI", abstract="Background: The global population is aging, leading to shifts in health care needs. In addition to developing technology to support physical health, there is an increasing recognition of the need to consider how technology can support emotional health. This raises the question of how to design devices that older adults can interact with to log their emotions. Objective: We designed and developed 2 novel tangible devices, inspired by existing paper-based scales of emotions. The findings from a field trial of these devices with older adults are reported. Methods: Using interviews, field deployment, and fixed logging tasks, we assessed the developed devices. Results: Our results demonstrate that the tangible devices provided data comparable with standardized psychological scales of emotion. The participants developed their own patterns of use around the devices, and their experience of using the devices uncovered a variety of design considerations. We discuss the difficulty of customizing devices for specific user needs while logging data comparable to psychological scales of emotion. We also highlight the value of reflecting on sparse emotional data. Conclusions: Our work demonstrates the potential for tangible emotional logging devices. It also supports further research on whether such devices can support the emotional health of older adults by encouraging reflection of their emotional state. ", doi="10.2196/34606", url="https://humanfactors.jmir.org/2022/2/e34606", url="http://www.ncbi.nlm.nih.gov/pubmed/35475781" } @Article{info:doi/10.2196/31459, author="Zhang, Lingmin and Li, Pengxiang", title="Problem-Based mHealth Literacy Scale (PB-mHLS): Development and Validation", journal="JMIR Mhealth Uhealth", year="2022", month="Apr", day="8", volume="10", number="4", pages="e31459", keywords="mobile health", keywords="mHealth literacy", keywords="instrument development", keywords="problem-based framework", abstract="Background: Mobile devices have greatly facilitated the use of digital health resources, particularly during the COVID-19 pandemic. Mobile health (mHealth) has become a common and important way to monitor and improve health conditions for people from different social classes. The ability to utilize mHealth affects its effectiveness; therefore, the widespread application of mHealth technologies calls for an instrument that can accurately measure health literacy in the era of mobile media. Objective: We aimed to (1) identify the components of mHealth literacy for ordinary users and (2) develop a systematic scale for appropriately measuring individuals' self-perceived mHealth literacy through a problem-based framework. Methods: We conducted an exploratory study involving in-depth interviews and observations (15 participants) in January 2020 and used exploratory factor analysis and confirmatory factor analysis to identify the components of mHealth literacy and develop an item pool. In February 2020, we conducted a pilot survey with 148 participants to explore the factor structures of items identified during the exploratory study. Subsequently, 2 surveys were administrated using quota sampling. The first survey (conducted in Guangdong, China) collected 552 responses during March 2020; we assessed composite reliability, convergent validity, and discriminant validity. The second survey (conducted in China nationwide) collected 433 responses during October 2021; we assessed criterion-related validity using structural equation modeling. Results: We identified 78 items during the exploratory study. The final scale---the Problem-Based mHealth Literacy Scale---consists of 33 items that reflect 8 domains of mHealth literacy. The first web-based survey suggested that mHealth literacy consists of 8 factors (ie, subscales), namely, mHealth desire, mobile phone operational skills, acquiring mHealth information, acquiring mHealth services, understanding of medical terms, mobile-based patient--doctor communication, evaluating mHealth information, and mHealth decision-making. These factors were found to be reliable (composite reliability >0.7), with good convergent validity (average variance extracted >0.5) and discriminant validity (square root of average variance extracted are greater than the correlation coefficients between factors). The findings also revealed that these 8 factors should be grouped under a second-order factor model ($\chi$2/df=2.701; comparative fit index 0.921; root mean square error of approximation 0.056; target coefficient 0.831). The second survey revealed that mHealth use had a significant impact ($\beta$=0.43, P<.001) on mHealth literacy and that mHealth literacy had a significant impact ($\beta$=0.23, P<.001) on health prevention behavior. Conclusions: This study revealed the distinctiveness of mHealth literacy by placing mHealth needs, the ability to understand medical terms, and the skills in patient--doctor interactions in the foreground. The Problem-Based mHealth Literacy Scale is a useful instrument for comprehensively measuring individuals' mHealth literacy and extends the concept of health literacy to the context of mobile communication. ", doi="10.2196/31459", url="https://mhealth.jmir.org/2022/4/e31459", url="http://www.ncbi.nlm.nih.gov/pubmed/35394446" } @Article{info:doi/10.2196/32777, author="Cheng, Christina and Elsworth, R. Gerald and Osborne, H. Richard", title="Validity Evidence of the eHealth Literacy Questionnaire (eHLQ) Part 2: Mixed Methods Approach to Evaluate Test Content, Response Process, and Internal Structure in the Australian Community Health Setting", journal="J Med Internet Res", year="2022", month="Mar", day="8", volume="24", number="3", pages="e32777", keywords="eHealth", keywords="health literacy", keywords="health equity", keywords="questionnaire design", keywords="validity evidence", keywords="eHLQ", keywords="mobile phone", abstract="Background: Digital technologies have changed how we manage our health, and eHealth literacy is needed to engage with health technologies. Any eHealth strategy would be ineffective if users' eHealth literacy needs are not addressed. A robust measure of eHealth literacy is essential for understanding these needs. On the basis of the eHealth Literacy Framework, which identified 7 dimensions of eHealth literacy, the eHealth Literacy Questionnaire (eHLQ) was developed. The tool has demonstrated robust psychometric properties in the Danish setting, but validity testing should be an ongoing and accumulative process. Objective: This study aims to evaluate validity evidence based on test content, response process, and internal structure of the eHLQ in the Australian community health setting. Methods: A mixed methods approach was used with cognitive interviewing conducted to examine evidence on test content and response process, whereas a cross-sectional survey was undertaken for evidence on internal structure. Data were collected at 3 diverse community health sites in Victoria, Australia. Psychometric testing included both the classical test theory and item response theory approaches. Methods included Bayesian structural equation modeling for confirmatory factor analysis, internal consistency and test-retest for reliability, and the Bayesian multiple-indicators, multiple-causes model for testing of differential item functioning. Results: Cognitive interviewing identified only 1 confusing term, which was clarified. All items were easy to read and understood as intended. A total of 525 questionnaires were included for psychometric analysis. All scales were homogenous with composite scale reliability ranging from 0.73 to 0.90. The intraclass correlation coefficient for test-retest reliability for the 7 scales ranged from 0.72 to 0.95. A 7-factor Bayesian structural equation modeling using small variance priors for cross-loadings and residual covariances was fitted to the data, and the model of interest produced a satisfactory fit (posterior productive P=.49, 95\% CI for the difference between observed and replicated chi-square values ?101.40 to 108.83, prior-posterior productive P=.92). All items loaded on the relevant factor, with loadings ranging from 0.36 to 0.94. No significant cross-loading was found. There was no evidence of differential item functioning for administration format, site area, and health setting. However, discriminant validity was not well established for scales 1, 3, 5, 6, and 7. Item response theory analysis found that all items provided precise information at different trait levels, except for 1 item. All items demonstrated different sensitivity to different trait levels and represented a range of difficulty levels. Conclusions: The evidence suggests that the eHLQ is a tool with robust psychometric properties and further investigation of discriminant validity is recommended. It is ready to be used to identify eHealth literacy strengths and challenges and assist the development of digital health interventions to ensure that people with limited digital access and skills are not left behind. ", doi="10.2196/32777", url="https://www.jmir.org/2022/3/e32777", url="http://www.ncbi.nlm.nih.gov/pubmed/35258475" } @Article{info:doi/10.2196/28252, author="Marsall, Matthias and Engelmann, Gerrit and Skoda, Eva-Maria and Teufel, Martin and B{\"a}uerle, Alexander", title="Measuring Electronic Health Literacy: Development, Validation, and Test of Measurement Invariance of a Revised German Version of the eHealth Literacy Scale", journal="J Med Internet Res", year="2022", month="Feb", day="2", volume="24", number="2", pages="e28252", keywords="eHealth", keywords="eHeals", keywords="health literacy", keywords="factor analysis", keywords="validation", keywords="measurement invariance", keywords="internet", keywords="health information", abstract="Background: The World Wide Web has become an essential source of health information. Nevertheless, the amount and quality of information provided may lead to information overload. Therefore, people need certain skills to search for, identify, and evaluate information from the internet. In the context of health information, these competencies are summarized as the construct of eHealth literacy. Previous research has highlighted the relevance of eHealth literacy in terms of health-related outcomes. However, the existing instrument assessing eHealth literacy in the German language reveals methodological limitations regarding test development and validation. The development and validation of a revised scale for this important construct is highly relevant. Objective: The objective of this study was the development and validation of a revised German eHealth literacy scale. In particular, this study aimed to focus on high methodological and psychometric standards to provide a valid and reliable instrument for measuring eHealth literacy in the German language. Methods: Two internationally validated instruments were merged to cover a wide scope of the construct of eHealth literacy and create a revised eHealth literacy scale. Translation into the German language followed scientific guidelines and recommendations to ensure content validity. Data from German-speaking people (n=470) were collected in a convenience sample from October to November 2020. Validation was performed by factor analyses. Further, correlations were performed to examine convergent, discriminant, and criterion validity. Additionally, analyses of measurement invariance of gender, age, and educational level were conducted. Results: Analyses revealed a 2-factorial model of eHealth literacy. By item-reduction, the 2 factors information seeking and information appraisal were measured with 8 items reaching acceptable-to-good model fits (comparative fit index [CFI]: 0.942, Tucker Lewis index [TLI]: 0.915, root mean square error of approximation [RMSEA]: 0.127, and standardized root mean square residual [SRMR]: 0.055). Convergent validity was comprehensively confirmed by significant correlations of information seeking and information appraisal with health literacy, internet confidence, and internet anxiety. Discriminant and criterion validity were examined by correlation analyses with various scales and could partly be confirmed. Scalar level of measurement invariance for gender (CFI: 0.932, TLI: 0.923, RMSEA: 0.122, and SRMR: 0.068) and educational level (CFI: 0.937, TLI: 0.934, RMSEA: 0.112, and SRMR: 0.063) were confirmed. Measurement invariance of age was rejected. Conclusions: Following scientific guidelines for translation and test validation, we developed a revised German eHealth Literacy Scale (GR-eHEALS). Our factor analyses confirmed an acceptable-to-good model fit. Construct validation in terms of convergent, discriminant, and criterion validity could mainly be confirmed. Our findings provide evidence for measurement invariance of the instrument regarding gender and educational level. The newly revised GR-eHEALS questionnaire represents a valid instrument to measure the important health-related construct eHealth literacy. ", doi="10.2196/28252", url="https://www.jmir.org/2022/2/e28252", url="http://www.ncbi.nlm.nih.gov/pubmed/35107437" } @Article{info:doi/10.2196/32855, author="Chen, Yu-Chi and Cheng, Christina and Osborne, H. Richard and Kayser, Lars and Liu, Chieh-Yu and Chang, Li-Chun", title="Validity Testing and Cultural Adaptation of the eHealth Literacy Questionnaire (eHLQ) Among People With Chronic Diseases in Taiwan: Mixed Methods Study", journal="J Med Internet Res", year="2022", month="Jan", day="19", volume="24", number="1", pages="e32855", keywords="chronic illness", keywords="eHealth literacy questionnaire", keywords="eHLQ", keywords="validation", keywords="cultural adaptation", keywords="eHealth", abstract="Background: Advancements in digital technologies seek to promote health and access to services. However, people lacking abilities and confidence to use technology are likely to be left behind, leading to health disparities. In providing digital health services, health care providers need to be aware of users' diverse electronic health (eHealth) literacy to address their particular needs and ensure equitable uptake and use of digital services. To understand such needs, an instrument that captures users' knowledge, skills, trust, motivation, and experiences in relation to technology is required. The eHealth Literacy Questionnaire (eHLQ) is a multidimensional tool with 7 scales covering diverse dimensions of eHealth literacy. The tool was simultaneously developed in English and Danish using a grounded and validity-driven approach and has been shown to have strong psychometric properties. Objective: This study aims to translate and culturally adapt the eHLQ for application among Mandarin-speaking people with chronic diseases in Taiwan and then undertake a rigorous set of validity-testing procedures. Methods: The cross-cultural adaptation of the eHLQ included translation and evaluation of the translations. The measurement properties were assessed using classical test theory and item response theory (IRT) approaches. Content validity, known-group validity, and internal consistency were explored, as well as item characteristic curves (ICCs), item discrimination, and item location/difficulty. Results: The adapted version was reviewed, and a recommended forward translation was confirmed through consensus. The tool exhibited good content validity. A total of 420 people with 1 or more chronic diseases participated in a validity-testing survey. The eHLQ showed good internal consistency (Cronbach $\alpha$=.75-.95). For known-group validity, all 7 eHLQ scales showed strong associations with education. Unidimensionality and local independence assumptions were met except for scale 2. IRT analysis showed that all items demonstrated good discrimination (range 0.27-12.15) and a good range of difficulty (range 0.59-1.67) except for 2 items in scale 7. Conclusions: Using a rigorous process, the eHLQ was translated from English into a culturally appropriate tool for use in the Mandarin language. Validity testing provided evidence of satisfactory-to-strong psychometric properties of the eHLQ. The 7 scales are likely to be useful research tools for evaluating digital health interventions and for informing the development of health technology products and interventions that equitably suit diverse users' needs. ", doi="10.2196/32855", url="https://www.jmir.org/2022/1/e32855", url="http://www.ncbi.nlm.nih.gov/pubmed/35044310" } @Article{info:doi/10.2196/30092, author="Alwasel, Athary and Stergioulas, Lampros and Fakhimi, Masoud and Garn, Wolfgang", title="Assessing Patient Engagement in Health Care: Proposal for a Modeling and Simulation Framework for Behavioral Analysis", journal="JMIR Res Protoc", year="2021", month="Dec", day="8", volume="10", number="12", pages="e30092", keywords="modeling and simulation", keywords="behavioral analysis", keywords="patient engagement", keywords="behavioral factors, health care", keywords="human factors", keywords="outcomes", keywords="patient health", keywords="health policy", keywords="chronic diseases", keywords="behavioral model", abstract="International Registered Report Identifier (IRRID): PRR1-10.2196/30092 ", doi="10.2196/30092", url="https://www.researchprotocols.org/2021/12/e30092", url="http://www.ncbi.nlm.nih.gov/pubmed/34889774" } @Article{info:doi/10.2196/11055, author="Mcgeough, Julienne and Gallagher-Mitchell, Thomas and Clark, Andrew Dan Philip and Harrison, Neil", title="Reliability and Confirmatory Factor Analysis (CFA) of a Paper- Versus App-Administered Resilience Scale in Scottish Youths: Comparative Study", journal="JMIR Mhealth Uhealth", year="2021", month="Dec", day="7", volume="9", number="12", pages="e11055", keywords="resilience", keywords="psychometrics", keywords="app administration", keywords="cyberpsychology", abstract="Background: Adequately measuring resilience is important to support young people and children who may need to access resources through social work or educational settings. A widely accepted measure of youth resilience has been developed previously and has been shown to be suitable for vulnerable youth. While the measure is completed by the young person on paper, it has been designed to be worked through with a teacher or social worker in case further clarification is required. However, this method is time consuming and, when faced with large groups of pupils who need assessment, can be overwhelming for schools and practitioners. This study assesses app software with a built-in avatar that can guide young persons through the assessment and its interpretation. Objective: Our primary objective is to compare the reliability and psychometric properties of a mobile software app to a paper version of the Child and Youth Resilience measure (CYRM-28). Second, this study assesses the use of the CYRM-28 in a Scottish youth population (aged 11-18 years). Methods: Following focus groups and discussion with teachers, social workers, and young people, an avatar was developed by a software company and integrated into an android smartphone app designed to ask questions via the device's inbuilt text-to-voice engine. In total, 714 students from 2 schools in North East Scotland completed either a paper version or app version of the CYRM-28. A cross-sectional design was used, and students completed their allocated version twice, with a 2-week period in between each testing. All participants could request clarification either from a guidance teacher (paper version) or from the in-built software glossary (app version). Results: Test and retest correlations showed that the app version performed better than the paper version of the questionnaire (paper version: r303=0.81; P<.001; 95\% CI 0.77-0.85; app version: r413=0.84; P<.001; 95\% CI 0.79-0.89). Fisher r to z transformation revealed a significant difference in the correlations (Z=--2.97, P<.01). Similarly, Cronbach $\alpha$ in both conditions was very high (app version: $\alpha$=.92; paper version: $\alpha$=.87), suggesting item redundancy. Ordinarily, this would lead to a possible removal of highly correlated items; however, our primary objective was to compare app delivery methods over a pen-and-paper mode and was hence beyond the scope of the study. Fisher r to z transformation revealed a significant difference in the correlations (Z=--3.69, P<.01). A confirmatory factor analysis supported the 3-factor solution (individual, relational, and contextual) and reported a good model fit ($\chi$215=27.6 [n=541], P=.24). Conclusions: ALEX, an avatar with an integrated voice guide, had higher reliability when measuring resilience than a paper version with teacher assistance. The CFA reports similar structure using the avatar when compared against the original validation. ", doi="10.2196/11055", url="https://mhealth.jmir.org/2021/12/e11055", url="http://www.ncbi.nlm.nih.gov/pubmed/34878995" } @Article{info:doi/10.2196/28782, author="van Barneveld, Esther and Lim, Arianne and van Hanegem, Nehalennia and Vork, Lisa and Herrewegh, Alexandra and van Poll, Mikal and Manders, Jessica and van Osch, Frits and Spaans, Wilbert and van Koeveringe, Gommert and Vrijens, Desiree and Kruimel, Joanna and Bongers, Marlies and Leue, Carsten", title="Patient-Reported Outcome Measure for Real-time Symptom Assessment in Women With Endometriosis: Focus Group Study", journal="JMIR Form Res", year="2021", month="Dec", day="3", volume="5", number="12", pages="e28782", keywords="endometriosis", keywords="pelvic pain", keywords="positive affect", keywords="negative affect", keywords="patient-reported outcome measure", keywords="focus groups", keywords="experience sampling method", keywords="momentary symptom assessment", keywords="mobile phone", abstract="Background: Symptoms related to endometriosis have a significant impact on the quality of life, and symptoms often recur. The experience sampling method (ESM), a digital questioning method characterized by randomly repeated momentary assessments, has several advantages over traditionally used measurements, including the ability to assess the temporal relationship between variables such as physical, mental, and social factors. Objective: The aim of this study is to develop an ESM tool for patients with endometriosis to accurately measure symptoms and their course over time, allowing for personalized treatment and adequate monitoring of treatment efficacy in individual patients. Methods: On the basis of international guidelines, items from validated questionnaires were selected through a literature review and during focus groups and multidisciplinary expert meetings. Data analysis was conducted using ATLAS.ti (ATLAS.ti Scientific Software Development GmbH). The feasibility and usability of the newly developed momentary assessment tool were tested for 28 consecutive days in 5 patients with endometriosis-related pain symptoms. Results: Momentary assessment items contained questions concerning endometriosis symptoms, general somatic symptoms, psychological symptoms, contextual information, and the use of food and medication. A morning questionnaire on sleep and sexuality was included. In a pilot study, the patients considered the tool easy to use but time consuming. The average compliance rate of momentary assessments was 37.8\% (106/280), with the highest completion rate during the first week (39/70, 56\%). Therefore, it is advisable to use the ESM for a maximum of 7 days. Conclusions: A new digital tool for endometriosis symptom assessment was developed using the ESM, which may help overcome the limitations of current retrospective questionnaires. After validation and testing, future studies will be planned to evaluate the use of this tool in a clinical setting in order to propose a personalized treatment plan for women with endometriosis. ", doi="10.2196/28782", url="https://formative.jmir.org/2021/12/e28782", url="http://www.ncbi.nlm.nih.gov/pubmed/34870608" } @Article{info:doi/10.2196/22390, author="Coumans, J. Juul M. and Oenema, Anke and Bolman, W. Catherine A. and Lechner, Lilian", title="Use and Appreciation of a Web-Based, Computer-Tailored Diet and Physical Activity Intervention Based on the Self-determination Theory: Evaluation Study of Process and Predictors", journal="JMIR Form Res", year="2021", month="Dec", day="2", volume="5", number="12", pages="e22390", keywords="diet", keywords="physical activity", keywords="eHealth", keywords="self-determination theory", keywords="motivational interviewing", keywords="process evaluation", keywords="nonusage attrition", abstract="Background: eHealth is a promising tool for promoting lifestyle behaviors such as a healthy diet and physical activity (PA). However, making people use interventions is a crucial and challenging problem in eHealth. More insight into use patterns and predicting factors is needed to improve future interventions. Objective: This study aims to examine the use, predictors of use, and appreciation of a web-based, computer-tailored, dietary and PA promotion intervention, MyLifestyleCoach, which is based on the self-determination theory. First, we depict the participants' flow in the intervention and identify moments when they are likely to discontinue use. Second, we investigate whether demographic, motivational, and program-related characteristics predict the use of several intervention elements. Finally, we report the appreciation scores for the intervention and the participant and program characteristics associated with these scores. Methods: This study was based on data from web-based self-report questionnaires. Here, objectively assessed intervention use data were analyzed from participants randomized to the intervention condition. Multiple stepwise (logistic) regression analyses were conducted to examine the predictors of intervention use and evaluation scores. Results: Our findings indicate a low full completion rate for the intervention among those who chose and completed the diet module (49/146, 33.6\%), the PA module (2/12, 17\%), and both modules (58/273, 21.2\%). Several points in the intervention where participants were likely to stop using the intervention were identified. Autonomous and intrinsic motivation toward diet were related to the completion of the initial sessions of the intervention (ie, the opening session in which participants could choose which module to follow and the first session of the diet module). In contrast, controlled motivation was linked to the completion of both modules (initial and follow-up sessions). Appreciation scores were somewhat positive. Appreciation was predicted by several motivational constructs, such as amotivation and basic psychological needs (eg, competence) and program-related features (eg, number of completed sessions). Conclusions: This study adds meaningful information on the use and appreciation of a web-based, computer-tailored dietary and PA intervention, MyLifestyleCoach. The results indicate that different types of motivations, such as extrinsic and intrinsic motivation, are at play at the points when people are likely to stop using the intervention. The intervention was appreciated fairly well, and several motivational constructs and fulfillment of basic psychological needs were associated with appreciation. Practical implications of these findings have been provided in this study. ", doi="10.2196/22390", url="https://formative.jmir.org/2021/12/e22390", url="http://www.ncbi.nlm.nih.gov/pubmed/34860670" } @Article{info:doi/10.2196/27873, author="Rego, Nazar{\'e} and Pereira, Silva Helena and Crispim, Jos{\'e}", title="Perceptions of Patients and Physicians on Teleconsultation at Home for Diabetes Mellitus: Survey Study", journal="JMIR Hum Factors", year="2021", month="Nov", day="23", volume="8", number="4", pages="e27873", keywords="teleconsultation", keywords="diabetes mellitus", keywords="telemedicine", keywords="eHealth", keywords="mobile phone", abstract="Background: Diabetes mellitus (DM) is one of the most challenging diseases in the 21st century and is the sixth leading cause of death. Telemedicine has increasingly been implemented in the care of patients with DM. Although teleconsultations at home have shown to be more effective for inducing HbA1c reduction than other telemedicine options, before the 2019 coronavirus disease crisis, their use had been lagging behind. Studies on physicians' or patients' perceptions about telemedicine have been performed independently of each other, and very few have focused on teleconsultations. In a time of great pressure for health systems and when an important portion of health care has to be assured at a distance, obtaining insights about teleconsultations at home from the stakeholders directly involved in the health care interaction is particularly important. Objective: The perceptions of patients and physicians about their intentions to use home synchronous teleconsultations for DM care are examined to identify drivers and barriers inherent to programs that involve home teleconsultations. Methods: Two identical questionnaires integrating the technology acceptance model and the unified theory of acceptance and use of technology and assessing the confidence in information and communication technology use of patients and physicians were developed. Responses by patients (n=75) and physicians (n=68) were analyzed using canonical correlation analysis. Results: Associations between predictor constructs (performance, effort, social influence, facilitating conditions, and attitude) and intention to use yielded significant functions, with a canonical R2 of 0.95 (for physicians) and 0.98 (patients). The main identified barriers to patient intention to use were the expected effort to explain the medical problem, and privacy and confidentiality issues. The major drivers were the facilitation of contact with the physician, which is beneficial to patient disease management and treatment, time savings, and reciprocity concerning physicians' willingness to perform teleconsultations. Responses from physicians revealed an association between intention to use and the expected performance of home teleconsultations. The major barrier to intention to use expressed in physicians' answers was doubts concerning the quality of patient examination. The major drivers were time savings, productivity increases, improvements in patient's health and patient management, National Health System costs reduction, and reciprocity relative to patients' willingness to engage in teleconsultations. Conclusions: To promote the use of home teleconsultations for DM, decision makers should improve patients' health literacy so the physician--patient communication is more effective; explore information and communication technology developments to reduce current limitations of non--face-to-face examinations; ensure patient privacy and data confidentiality; and demonstrate the capabilities of home teleconsultations to physicians. ", doi="10.2196/27873", url="https://humanfactors.jmir.org/2021/4/e27873", url="http://www.ncbi.nlm.nih.gov/pubmed/34817394" } @Article{info:doi/10.2196/30644, author="Lee, Jiyeon and Lee, Eun-Hyun and Chae, Duckhee", title="eHealth Literacy Instruments: Systematic Review of Measurement Properties", journal="J Med Internet Res", year="2021", month="Nov", day="15", volume="23", number="11", pages="e30644", keywords="eHealth literacy", keywords="systematic review", keywords="meta-analysis", keywords="psychometrics", keywords="reliability", keywords="validity", keywords="scale", keywords="instrument", abstract="Background: The internet is now a major source of health information. With the growth of internet users, eHealth literacy has emerged as a new concept for digital health care. Therefore, health professionals need to consider the eHealth literacy of consumers when providing care utilizing digital health technologies. Objective: This study aimed to identify currently available eHealth literacy instruments and evaluate their measurement properties to provide robust evidence to researchers and clinicians who are selecting an eHealth literacy instrument. Methods: We conducted a systematic review and meta-analysis of self-reported eHealth literacy instruments by applying the updated COSMIN (COnsensus-based Standards for the selection of health Measurement INstruments) methodology. Results: This study included 7 instruments from 41 articles describing 57 psychometric studies, as identified in 4 databases (PubMed, CINAHL, Embase, and PsycInfo). No eHealth literacy instrument provided evidence for all measurement properties. The eHealth literacy scale (eHEALS) was originally developed with a single-factor structure under the definition of eHealth literacy before the rise of social media and the mobile web. That instrument was evaluated in 18 different languages and 26 countries, involving diverse populations. However, various other factor structures were exhibited: 7 types of two-factor structures, 3 types of three-factor structures, and 1 bifactor structure. The transactional eHealth literacy instrument (TeHLI) was developed to reflect the broader concept of eHealth literacy and was demonstrated to have a sufficient low-quality and very low-quality evidence for content validity (relevance, comprehensiveness, and comprehensibility) and sufficient high-quality evidence for structural validity and internal consistency; however, that instrument has rarely been evaluated. Conclusions: The eHealth literacy scale was the most frequently investigated instrument. However, it is strongly recommended that the instrument's content be updated to reflect recent advancements in digital health technologies. In addition, the transactional eHealth literacy instrument needs improvements in content validity and further psychometric studies to increase the credibility of its synthesized evidence. ", doi="10.2196/30644", url="https://www.jmir.org/2021/11/e30644", url="http://www.ncbi.nlm.nih.gov/pubmed/34779781" } @Article{info:doi/10.2196/27706, author="Cilia, Federica and Carette, Romuald and Elbattah, Mahmoud and Dequen, Gilles and Gu{\'e}rin, Jean-Luc and Bosche, J{\'e}r{\^o}me and Vandromme, Luc and Le Driant, Barbara", title="Computer-Aided Screening of Autism Spectrum Disorder: Eye-Tracking Study Using Data Visualization and Deep Learning", journal="JMIR Hum Factors", year="2021", month="Oct", day="25", volume="8", number="4", pages="e27706", keywords="autism spectrum disorder", keywords="screening", keywords="eye tracking", keywords="data visualization", keywords="machine learning", keywords="deep learning", keywords="AI", keywords="ASS", keywords="artificial intelligence", keywords="ML", keywords="adolescent", keywords="diagnosis", abstract="Background: The early diagnosis of autism spectrum disorder (ASD) is highly desirable but remains a challenging task, which requires a set of cognitive tests and hours of clinical examinations. In addition, variations of such symptoms exist, which can make the identification of ASD even more difficult. Although diagnosis tests are largely developed by experts, they are still subject to human bias. In this respect, computer-assisted technologies can play a key role in supporting the screening process. Objective: This paper follows on the path of using eye tracking as an integrated part of screening assessment in ASD based on the characteristic elements of the eye gaze. This study adds to the mounting efforts in using eye tracking technology to support the process of ASD screening Methods: The proposed approach basically aims to integrate eye tracking with visualization and machine learning. A group of 59 school-aged participants took part in the study. The participants were invited to watch a set of age-appropriate photographs and videos related to social cognition. Initially, eye-tracking scanpaths were transformed into a visual representation as a set of images. Subsequently, a convolutional neural network was trained to perform the image classification task. Results: The experimental results demonstrated that the visual representation could simplify the diagnostic task and also attained high accuracy. Specifically, the convolutional neural network model could achieve a promising classification accuracy. This largely suggests that visualizations could successfully encode the information of gaze motion and its underlying dynamics. Further, we explored possible correlations between the autism severity and the dynamics of eye movement based on the maximal information coefficient. The findings primarily show that the combination of eye tracking, visualization, and machine learning have strong potential in developing an objective tool to assist in the screening of ASD. Conclusions: Broadly speaking, the approach we propose could be transferable to screening for other disorders, particularly neurodevelopmental disorders. ", doi="10.2196/27706", url="https://humanfactors.jmir.org/2021/4/e27706", url="http://www.ncbi.nlm.nih.gov/pubmed/34694238" } @Article{info:doi/10.2196/26675, author="Tahri Sqalli, Mohammed and Al-Thani, Dena and Elshazly, B. Mohamed and Al-Hijji, ?Mohammed", title="Interpretation of a 12-Lead Electrocardiogram by Medical Students: Quantitative Eye-Tracking Approach", journal="JMIR Med Educ", year="2021", month="Oct", day="14", volume="7", number="4", pages="e26675", keywords="eye tracking", keywords="electrocardiogram", keywords="ECG interpretation", keywords="medical education", keywords="human-computer interaction", keywords="medical student", keywords="eye", keywords="tracking", keywords="interpretation", keywords="ECG", abstract="Background: Accurate interpretation of a 12-lead electrocardiogram (ECG) demands high levels of skill and expertise. Early training in medical school plays an important role in building the ECG interpretation skill. Thus, understanding how medical students perform the task of interpretation is important for improving this skill. Objective: We aimed to use eye tracking as a tool to research how eye fixation can be used to gain a deeper understanding of how medical students interpret ECGs. Methods: In total, 16 medical students were recruited to interpret 10 different ECGs each. Their eye movements were recorded using an eye tracker. Fixation heatmaps of where the students looked were generated from the collected data set. Statistical analysis was conducted on the fixation count and duration using the Mann-Whitney U test and the Kruskal-Wallis test. Results: The average percentage of correct interpretations was 55.63\%, with an SD of 4.63\%. After analyzing the average fixation duration, we found that medical students study the three lower leads (rhythm strips) the most using a top-down approach: lead II (mean=2727 ms, SD=456), followed by leads V1 (mean=1476 ms, SD=320) and V5 (mean=1301 ms, SD=236). We also found that medical students develop a personal system of interpretation that adapts to the nature and complexity of the diagnosis. In addition, we found that medical students consider some leads as their guiding point toward finding a hint leading to the correct interpretation. Conclusions: The use of eye tracking successfully provides a quantitative explanation of how medical students learn to interpret a 12-lead ECG. ", doi="10.2196/26675", url="https://mededu.jmir.org/2021/4/e26675", url="http://www.ncbi.nlm.nih.gov/pubmed/34647899" } @Article{info:doi/10.2196/31627, author="Liu, Hua-Xuan and Chow, Bik-Chu and Liang, Wei and Hassel, Holger and Huang, Wendy YaJun", title="Measuring a Broad Spectrum of eHealth Skills in the Web 3.0 Context Using an eHealth Literacy Scale: Development and Validation Study", journal="J Med Internet Res", year="2021", month="Sep", day="23", volume="23", number="9", pages="e31627", keywords="eHealth literacy", keywords="scale development", keywords="validation", keywords="college students", abstract="Background: eHealth literacy (EHL) refers to a variety of capabilities that enable individuals to obtain health information from electronic resources and apply it to solve health problems. With the digitization of health care and the wide availability of health apps, a more diverse range of eHealth skills is required to properly use such health facilities. Existing EHL measurements focus mainly on the skill of obtaining health information (Web 1.0), whereas skills for web-based interactions (Web 2.0) and self-managing health data and applying information (Web 3.0) have not been well measured. Objective: This study aims to develop an EHL scale (eHLS) termed eHLS-Web3.0 comprising a comprehensive spectrum of Web 1.0, 2.0, and 3.0 skills to measure EHL, and evaluate its validity and reliability along with the measurement invariance among college students. Methods: In study 1, 421 Chinese college students (mean age 20.5, SD 1.4 years; 51.8\% female) and 8 health experts (mean age 38.3, SD 5.9 years; 87.5\% female) were involved to develop the eHLS-Web3.0. The scale development included three steps: item pool generation, content validation, and exploratory factor analysis. In study 2, 741 college students (mean age 21.3, SD 1.4 years; 52.2\% female) were recruited from 4 Chinese cities to validate the newly developed eHLS-Web3.0. The construct validity, convergent validity, concurrent validity, internal consistency reliability, test-retest reliability, and measurement invariance across genders, majors, and regions were examined by a series of statistical analyses, including confirmatory factor analysis (CFA) and multigroup CFAs using SPSS and Mplus software packages. Results: Based on the item pool of 374 statements collected during the conceptual development, 24 items (4-10 items per subscale) were generated and adjusted after cognitive testing and content validity examination. Through exploratory factor analysis, a 3-factor eHLS-Web3.0 was finally developed, and it included acquisition (8 items), verification (6 items), and application (10 items). In study 2, CFAs supported the construct validity of the 24-item 3D eHLS-Web3.0 ($\chi$2244=903.076, $\chi$2244=3.701, comparative fit index=0.924, Tucker-Lewis index=0.914, root mean square error of approximation [RMSEA]=0.06, and standardized root mean residual [SRMR]=0.051). The average variance extracted (AVE) value of 0.58 and high correlation between eHLS-Web3.0 subscales and the eHealth Literacy Scale (r=0.725-0.880, P<.001) indicated the convergent validity and concurrent validity of the eHLS-Web3.0. The results also indicated satisfactory internal consistency reliability ($\alpha$=.976, $\rho$=0.934-0.956) and test-retest reliability (r=0.858, P<.001) of the scale. Multigroup CFA demonstrated the 24-item eHLS-Web3.0 to be invariant at all configural, metric, strength, and structural levels across genders (female and male), majors (sport-related, medical, and general), and regions (Yinchuan, Kunming, Xiamen, and Beijing). Conclusions: The 24-item 3D eHLS-Web3.0 proved to be a reliable and valid measurement tool for EHL in the Web 3.0 context among Chinese college students. ", doi="10.2196/31627", url="https://www.jmir.org/2021/9/e31627", url="http://www.ncbi.nlm.nih.gov/pubmed/34554098" } @Article{info:doi/10.2196/19245, author="Domingos, C{\'e}lia and Costa, Soares Patr{\'i}cio and Santos, Correia Nadine and P{\^e}go, Miguel Jos{\'e}", title="European Portuguese Version of the User Satisfaction Evaluation Questionnaire (USEQ): Transcultural Adaptation and Validation Study", journal="JMIR Mhealth Uhealth", year="2021", month="Jun", day="29", volume="9", number="6", pages="e19245", keywords="satisfaction", keywords="usability", keywords="reliability", keywords="validity", keywords="seniors", keywords="elderly", keywords="technology", keywords="wearables", abstract="Background: Wearable activity trackers have the potential to encourage users to adopt healthier lifestyles by tracking daily health information. However, usability is a critical factor in technology adoption. Older adults may be more resistant to accepting novel technologies. Understanding the difficulties that older adults face when using activity trackers may be useful for implementing strategies to promote their use. Objective: The purpose of this study was to conduct a transcultural adaptation of the User Satisfaction Evaluation Questionnaire (USEQ) into European Portuguese and validate the adapted questionnaire. Additionally, we aimed to provide information about older adults' satisfaction regarding the use of an activity tracker (Xiaomi Mi Band 2). Methods: The USEQ was translated following internationally accepted guidelines. The psychometric evaluation of the final version of the translated USEQ was assessed based on structural validity using exploratory and confirmatory factor analyses. Construct validity was examined using divergent and discriminant validity analysis, and internal consistency was evaluated using Cronbach $\alpha$ and McDonald $\omega$ coefficients. Results: A total of 110 older adults completed the questionnaire. Confirmatory factor analysis supported the conceptual unidimensionality of the USEQ ($\chi$24=7.313, P=.12, comparative fit index=0.973, Tucker-Lewis index=0.931, goodness of fit index=0.977, root mean square error of approximation=0.087, standardized root mean square residual=0.038). The internal consistency showed acceptable reliability (Cronbach $\alpha$=.677, McDonald $\omega$=0.722). Overall, 90\% of the participants reported excellent satisfaction with the Xiaomi Mi Band 2. Conclusions: The findings support the use of this translated USEQ as a valid and reliable tool for measuring user satisfaction with wearable activity trackers in older adults, with psychometric properties consistent with the original version. ", doi="10.2196/19245", url="https://mhealth.jmir.org/2021/6/e19245", url="http://www.ncbi.nlm.nih.gov/pubmed/34185018" } @Article{info:doi/10.2196/25310, author="Hwang, Sung Ho and Choi, Seong-Youl", title="Development of an Android-Based Self-Report Assessment for Elderly Driving Risk (SAFE-DR) App: Mixed Methods Study", journal="JMIR Mhealth Uhealth", year="2021", month="Jun", day="17", volume="9", number="6", pages="e25310", keywords="Android driving app", keywords="driving safety", keywords="reliability", keywords="self-assessment", keywords="validity", keywords="mHealth", keywords="driving", abstract="Background: Self-report assessments for elderly drivers are used in various countries for accessible, widespread self-monitoring of driving ability in the elderly population. Likewise, in South Korea, a paper-based Self-Report Assessment for Elderly Driving Risk (SAFE-DR) has been developed. Here, we implemented the SAFE-DR through an Android app, which provides the advantages of accessibility, convenience, and provision of diverse information, and verified its reliability and validity. Objective: This study tested the validity and reliability of a mobile app-based version of a self-report assessment for elderly persons contextualized to the South Korean culture and compared it with a paper-based test. Methods: In this mixed methods study, we recruited and interviewed 567 elderly drivers (aged 65 years and older) between August 2018 and May 2019. For participants who provided consent, the app-based test was repeated after 2 weeks and an additional paper-based test (Driver 65 Plus test) was administered. Using the collected data, we analyzed the reliability and validity of the app-based SAFE-DR. The internal consistency of provisional items in each subdomain of the SAFE-DR and the test-retest stability were analyzed to examine reliability. Exploratory factor analysis was performed to examine the validity of the subdomain configuration. To verify the appropriateness of using an app-based test for older drivers possibly unfamiliar with mobile technology, the correlation between the results of the SAFE-DR app and the paper-based offline test was also analyzed. Results: In the reliability analysis, Cronbach $\alpha$ for all items was 0.975 and the correlation of each item with the overall score ranged from r=0.520 to r=0.823; 4 items with low correlations were removed from each of the subdomains. In the retest after 2 weeks, the mean correlation coefficient across all items was r=0.951, showing very high reliability. Exploratory factor analysis on 40 of the 44 items established 5 subdomains: on-road (8 items), coping (16 items), cognitive functions (5 items), general conditions (8 items), and medical health (3 items). A very strong negative correlation of --0.864 was observed between the total score for the app-based SAFE-DR and the paper-based Driver 65 Plus with decorrelation scales. The app-based test was found to be reliable. Conclusions: In this study, we developed an app-based self-report assessment tool for elderly drivers and tested its reliability and validity. This app can help elderly individuals easily assess their own driving skills. Therefore, this assessment can be used to educate drivers and for preventive screening for elderly drivers who want to renew their driver's licenses in South Korea. In addition, the app can contribute to safe driving among elderly drivers. ", doi="10.2196/25310", url="https://mhealth.jmir.org/2021/6/e25310", url="http://www.ncbi.nlm.nih.gov/pubmed/33934068" } @Article{info:doi/10.2196/25218, author="Helou, Samar and Abou-Khalil, Victoria and Iacobucci, Riccardo and El Helou, Elie and Kiyono, Ken", title="Automatic Classification of Screen Gaze and Dialogue in Doctor-Patient-Computer Interactions: Computational Ethnography Algorithm Development and Validation", journal="J Med Internet Res", year="2021", month="May", day="10", volume="23", number="5", pages="e25218", keywords="computational ethnography", keywords="patient-physician communication", keywords="doctor-patient-computer interaction", keywords="electronic medical records", keywords="pose estimation", keywords="gaze", keywords="voice activity", keywords="dialogue", keywords="clinic layout", abstract="Background: The study of doctor-patient-computer interactions is a key research area for examining doctor-patient relationships; however, studying these interactions is costly and obtrusive as researchers usually set up complex mechanisms or intrude on consultations to collect, then manually analyze the data. Objective: We aimed to facilitate human-computer and human-human interaction research in clinics by providing a computational ethnography tool: an unobtrusive automatic classifier of screen gaze and dialogue combinations in doctor-patient-computer interactions. Methods: The classifier's input is video taken by doctors using their computers' internal camera and microphone. By estimating the key points of the doctor's face and the presence of voice activity, we estimate the type of interaction that is taking place. The classification output of each video segment is 1 of 4 interaction classes: (1) screen gaze and dialogue, wherein the doctor is gazing at the computer screen while conversing with the patient; (2) dialogue, wherein the doctor is gazing away from the computer screen while conversing with the patient; (3) screen gaze, wherein the doctor is gazing at the computer screen without conversing with the patient; and (4) other, wherein no screen gaze or dialogue are detected. We evaluated the classifier using 30 minutes of video provided by 5 doctors simulating consultations in their clinics both in semi- and fully inclusive layouts. Results: The classifier achieved an overall accuracy of 0.83, a performance similar to that of a human coder. Similar to the human coder, the classifier was more accurate in fully inclusive layouts than in semi-inclusive layouts. Conclusions: The proposed classifier can be used by researchers, care providers, designers, medical educators, and others who are interested in exploring and answering questions related to screen gaze and dialogue in doctor-patient-computer interactions. ", doi="10.2196/25218", url="https://www.jmir.org/2021/5/e25218", url="http://www.ncbi.nlm.nih.gov/pubmed/33970117" } @Article{info:doi/10.2196/15032, author="Witteman, O. Holly and Vaisson, Gratianne and Provencher, Thierry and Chipenda Dansokho, Selma and Colquhoun, Heather and Dugas, Michele and Fagerlin, Angela and Giguere, MC Anik and Haslett, Lynne and Hoffman, Aubri and Ivers, M. Noah and L{\'e}gar{\'e}, France and Trottier, Marie-Eve and Stacey, Dawn and Volk, J. Robert and Renaud, Jean-S{\'e}bastien", title="An 11-Item Measure of User- and Human-Centered Design for Personal Health Tools (UCD-11): Development and Validation", journal="J Med Internet Res", year="2021", month="Mar", day="16", volume="23", number="3", pages="e15032", keywords="patient-centered care", keywords="patient participation", keywords="health services research", keywords="validation studies as topic", keywords="surveys and questionnaires", keywords="humans", keywords="user-centred design, human-centred design", keywords="user-centered design", keywords="human-centered design", keywords="co-design", keywords="instrument", keywords="scale", keywords="index", keywords="patient and public involvement", abstract="Background: Researchers developing personal health tools employ a range of approaches to involve prospective users in design and development. Objective: The aim of this paper was to develop a validated measure of the human- or user-centeredness of design and development processes for personal health tools. Methods: We conducted a psychometric analysis of data from a previous systematic review of the design and development processes of 348 personal health tools. Using a conceptual framework of user-centered design, our team of patients, caregivers, health professionals, tool developers, and researchers analyzed how specific practices in tool design and development might be combined and used as a measure. We prioritized variables according to their importance within the conceptual framework and validated the resultant measure using principal component analysis with Varimax rotation, classical item analysis, and confirmatory factor analysis. Results: We retained 11 items in a 3-factor structure explaining 68\% of the variance in the data. The Cronbach alpha was .72. Confirmatory factor analysis supported our hypothesis of a latent construct of user-centeredness. Items were whether or not: (1) patient, family, caregiver, or surrogate users were involved in the steps that help tool developers understand users or (2) develop a prototype, (3) asked their opinions, (4) observed using the tool or (5) involved in steps intended to evaluate the tool, (6) the process had 3 or more iterative cycles, (7) changes between cycles were explicitly reported, (8) health professionals were asked their opinion and (9) consulted before the first prototype was developed or (10) between initial and final prototypes, and (11) a panel of other experts was involved. Conclusions: The User-Centered Design 11-item measure (UCD-11) may be used to quantitatively document the user/human-centeredness of design and development processes of patient-centered tools. By building an evidence base about such processes, we can help ensure that tools are adapted to people who will use them, rather than requiring people to adapt to tools. ", doi="10.2196/15032", url="https://www.jmir.org/2021/3/e15032", url="http://www.ncbi.nlm.nih.gov/pubmed/33724194" } @Article{info:doi/10.2196/26360, author="Jones, Chelsea and Harasym, Jessica and Miguel-Cruz, Antonio and Chisholm, Shannon and Smith-MacDonald, Lorraine and Br{\'e}mault-Phillips, Suzette", title="Neurocognitive Assessment Tools for Military Personnel With Mild Traumatic Brain Injury: Scoping Literature Review", journal="JMIR Ment Health", year="2021", month="Feb", day="22", volume="8", number="2", pages="e26360", keywords="military", keywords="rehabilitation", keywords="head injury", keywords="posttraumatic stress disorder", keywords="cognition", keywords="neurocognitive assessment tool", keywords="traumatic brain injury", keywords="assessment", keywords="brain concussion", keywords="mobile phone", abstract="Background: Mild traumatic brain injury (mTBI) occurs at a higher frequency among military personnel than among civilians. A common symptom of mTBIs is cognitive dysfunction. Health care professionals use neuropsychological assessments as part of a multidisciplinary and best practice approach for mTBI management. Such assessments support clinical diagnosis, symptom management, rehabilitation, and return-to-duty planning. Military health care organizations currently use computerized neurocognitive assessment tools (NCATs). NCATs and more traditional neuropsychological assessments present unique challenges in both clinical and military settings. Many research gaps remain regarding psychometric properties, usability, acceptance, feasibility, effectiveness, sensitivity, and utility of both types of assessments in military environments. Objective: The aims of this study were to explore evidence regarding the use of NCATs among military personnel who have sustained mTBIs; evaluate the psychometric properties of the most commonly tested NCATs for this population; and synthesize the data to explore the range and extent of NCATs among this population, clinical recommendations for use, and knowledge gaps requiring future research. Methods: Studies were identified using MEDLINE, Embase, American Psychological Association PsycINFO, CINAHL Plus with Full Text, Psych Article, Scopus, and Military \& Government Collection. Data were analyzed using descriptive analysis, thematic analysis, and the Randolph Criteria. Narrative synthesis and the PRISMA-ScR (Preferred Reporting Items for Systematic Reviews and Meta-analyses extension for Scoping Reviews) guided the reporting of findings. The psychometric properties of NCATs were evaluated with specific criteria and summarized. Results: Of the 104 papers, 33 met the inclusion criteria for this scoping review. Thematic analysis and NCAT psychometrics were reported and summarized. Conclusions: When considering the psychometric properties of the most commonly used NCATs in military populations, these assessments have yet to demonstrate adequate validity, reliability, sensitivity, and clinical utility among military personnel with mTBIs. Additional research is needed to further validate NCATs within military populations, especially for those living outside of the United States and individuals experiencing other conditions known to adversely affect cognitive processing. Knowledge gaps remain, warranting further study of psychometric properties and the utility of baseline and normative testing for NCATs. ", doi="10.2196/26360", url="https://mental.jmir.org/2021/2/e26360", url="http://www.ncbi.nlm.nih.gov/pubmed/33616538" } @Article{info:doi/10.2196/24457, author="Mustafa, Norashikin and Safii, Shanita Nik and Jaffar, Aida and Sani, Samsiah Nor and Mohamad, Izham Mohd and Abd Rahman, Hadi Abdul and Mohd Sidik, Sherina", title="Malay Version of the mHealth App Usability Questionnaire (M-MAUQ): Translation, Adaptation, and Validation Study", journal="JMIR Mhealth Uhealth", year="2021", month="Feb", day="4", volume="9", number="2", pages="e24457", keywords="mHealth app", keywords="questionnaire validation", keywords="questionnaire translation", keywords="Malay MAUQ", keywords="usability", keywords="mHealth", keywords="education", keywords="Malay language", keywords="Malay", keywords="questionnaire", keywords="mobile phone", abstract="Background: Mobile health (mHealth) apps play an important role in delivering education, providing advice on treatment, and monitoring patients' health. Good usability of mHealth apps is essential to achieve the objectives of mHealth apps efficiently. To date, there are questionnaires available to assess the general system usability but not explicitly tailored to precisely assess the usability of mHealth apps. Hence, the mHealth App Usability Questionnaire (MAUQ) was developed with 4 versions according to the type of app (interactive or standalone) and according to the target user (patient or provider). Standalone MAUQ for patients comprises 3 subscales, which are ease of use, interface and satisfaction, and usefulness. Objective: This study aimed to translate and validate the English version of MAUQ (standalone for patients) into a Malay version of MAUQ (M-MAUQ) for mHealth app research and usage in future in Malaysia. Methods: Forward and backward translation and harmonization of M-MAUQ were conducted by Malay native speakers who also spoke English as their second language. The process began with a forward translation by 2 independent translators followed by harmonization to produce an initial translated version of M-MAUQ. Next, the forward translation was continued by another 2 translators who had never seen the original MAUQ. Lastly, harmonization was conducted among the committee members to resolve any ambiguity and inconsistency in the words and sentences of the items derived with the prefinal adapted questionnaire. Subsequently, content and face validations were performed with 10 experts and 10 target users, respectively. Modified kappa statistic was used to determine the interrater agreement among the raters. The reliability of the M-MAUQ was assessed by 51 healthy young adult mobile phone users. Participants needed to install the MyFitnessPal app and use it for 2 days for familiarization before completing the designated task and answer the M-MAUQ. The MyFitnessPal app was selected because it is one among the most popular installed mHealth apps globally available for iPhone and Android users and represents a standalone mHealth app. Results: The content validity index for the relevancy and clarity of M-MAUQ were determined to be 0.983 and 0.944, respectively, which indicated good relevancy and clarity. The face validity index for understandability was 0.961, which indicated that users understood the M-MAUQ. The kappa statistic for every item in M-MAUQ indicated excellent agreement between the raters ($\kappa$ ranging from 0.76 to 1.09). The Cronbach $\alpha$ for 18 items was .946, which also indicated good reliability in assessing the usability of the mHealth app. Conclusions: The M-MAUQ fulfilled the validation criteria as it revealed good reliability and validity similar to the original version. M-MAUQ can be used to assess the usability of mHealth apps in Malay in the future. ", doi="10.2196/24457", url="http://mhealth.jmir.org/2021/2/e24457/", url="http://www.ncbi.nlm.nih.gov/pubmed/33538704" } @Article{info:doi/10.2196/21161, author="Sevilla-Gonzalez, Rocio Magdalena Del and Moreno Loaeza, Lizbeth and Lazaro-Carrera, Sofia Laura and Bourguet Ramirez, Brigette and V{\'a}zquez Rodr{\'i}guez, Anabel and Peralta-Pedrero, Luisa Mar{\'i}a and Almeda-Valdes, Paloma", title="Spanish Version of the System Usability Scale for the Assessment of Electronic Tools: Development and Validation", journal="JMIR Hum Factors", year="2020", month="Dec", day="16", volume="7", number="4", pages="e21161", keywords="mHealth", keywords="usability", keywords="validation", keywords="System Usability Scale", keywords="Spanish", abstract="Background: The System Usability Scale (SUS) is a common metric used to assess the usability of a system, and it was initially developed in English. The implementation of electronic systems for clinical counseling (eHealth and mobile health) is increasing worldwide. Therefore, tools are needed to evaluate these applications in the languages and regional contexts in which the electronic tools are developed. Objective: This study aims to translate, culturally adapt, and validate the original English version of the SUS into a Spanish version. Methods: The translation process included forward and backward translation. Forward translations were made by 2 native Spanish speakers who spoke English as their second language, and a backward translation was made by a native English speaker. The Spanish SUS questionnaire was validated by 10 experts in mobile app development. The face validity of the questionnaire was tested with 10 mobile phone users, and the reliability testing was conducted among 88 electronic application users. Results: The content validity index of the new Spanish SUS was good, as indicated by a rating of 0.92 for the relevance of the items. The questionnaire was easy to understand, based on a face validity index of 0.94. The Cronbach $\alpha$ was .812 (95\% CI 0.748-0.866; P<.001). Conclusions: The new Spanish SUS questionnaire is a valid and reliable tool to assess the usability of electronic tools among Spanish-speaking users. ", doi="10.2196/21161", url="http://humanfactors.jmir.org/2020/4/e21161/", url="http://www.ncbi.nlm.nih.gov/pubmed/33325828" } @Article{info:doi/10.2196/16316, author="W{\aa}ngdahl, Josefin and Jaensson, Maria and Dahlberg, Karuna and Nilsson, Ulrica", title="The Swedish Version of the Electronic Health Literacy Scale: Prospective Psychometric Evaluation Study Including Thresholds Levels", journal="JMIR Mhealth Uhealth", year="2020", month="Feb", day="24", volume="8", number="2", pages="e16316", keywords="eHealth", keywords="literacy", keywords="internet", keywords="psychometrics", abstract="Background: To enhance the efficacy of information and communication, health care has increasingly turned to digitalization. Electronic health (eHealth) is an important factor that influences the use and receipt of benefits from Web-based health resources. Consequently, the concept of eHealth literacy has emerged, and in 2006 Norman and Skinner developed an 8-item self-report instrument to measure these skills: the eHealth Literacy Scale (eHEALS). However, the eHEALS has not been tested for reliability and validity in the general Swedish population and no threshold values have been established. Objective: The aim of this study was to translate and adapt eHEALS into a Swedish version; evaluate convergent validity and psychometric properties; and determine threshold levels for inadequate, problematic, and sufficient eHealth literacy. Methods: Prospective psychometric evaluation study included 323 participants equally distributed between sexes with a mean age of 49 years recruited from 12 different arenas. Results: There were some difficulties translating the English concept health resources. This resulted in this concept being translated as health information (ie, H{\"a}lsoinformation in Swedish). The eHEALS total score was 29.3 (SD 6.2), Cronbach alpha .94, Spearman-Brown coefficient .96, and response rate 94.6\%. All a priori hypotheses were confirmed, supporting convergent validity. The test-retest reliability indicated an almost perfect agreement, .86 (P<.001). An exploratory factor analysis found one component explaining 64\% of the total variance. No floor or ceiling effect was noted. Thresholds levels were set at 8 to 20 = inadequate, 21 to 26 = problematic, and 27 to 40 = sufficient, and there were no significant differences in distribution of the three levels between the Swedish version of eHEALS and the HLS-EU-Q16. Conclusions: The Swedish version of eHEALS was assessed as being unidimensional with high internal consistency of the instrument, making the reliability adequate. Adapted threshold levels for inadequate, problematic, and sufficient levels of eHealth literacy seem to be relevant. However, there are some linguistic issues relating to the concept of health resources. ", doi="10.2196/16316", url="https://mhealth.jmir.org/2020/2/e16316", url="http://www.ncbi.nlm.nih.gov/pubmed/32130168" } @Article{info:doi/10.2196/14829, author="Silva, G. Anabela and Sim{\~o}es, Patr{\'i}cia and Santos, Rita and Queir{\'o}s, Alexandra and Rocha, P. Nelson and Rodrigues, M{\'a}rio", title="A Scale to Assess the Methodological Quality of Studies Assessing Usability of Electronic Health Products and Services: Delphi Study Followed by Validity and Reliability Testing", journal="J Med Internet Res", year="2019", month="Nov", day="15", volume="21", number="11", pages="e14829", keywords="quality of health care", keywords="eHealth", keywords="mHealth", keywords="efficiency", abstract="Background: The usability of electronic health (eHealth) and mobile health apps is of paramount importance as it impacts the quality of care. Methodological quality assessment is a common practice in the field of health for different designs and types of studies. However, we were unable to find a scale to assess the methodological quality of studies on the usability of eHealth products or services. Objective: This study aimed to develop a scale to assess the methodological quality of studies assessing usability of mobile apps and to perform a preliminary analysis of of the scale's feasibility, reliability, and construct validity on studies assessing usability of mobile apps, measuring aspects of physical activity. Methods: A 3-round Delphi panel was used to generate a pool of items considered important when assessing the quality of studies on the usability of mobile apps. These items were used to write the scale and the guide to assist its use. The scale was then used to assess the quality of studies on usability of mobile apps for physical activity, and it assessed in terms of feasibility, interrater reliability, and construct validity. Results: A total of 25 experts participated in the Delphi panel, and a 15-item scale was developed. This scale was shown to be feasible (time of application mean 13.10 [SD 2.59] min), reliable (intraclass correlation coefficient=0.81; 95\% CI 0.55-0.93), and able to discriminate between low- and high-quality studies (high quality: mean 9.22 [SD 0.36]; low quality: mean 6.86 [SD 0.80]; P=.01). Conclusions: The scale that was developed can be used both to assess the methodological quality of usability studies and to inform its planning. ", doi="10.2196/14829", url="http://www.jmir.org/2019/11/e14829/", url="http://www.ncbi.nlm.nih.gov/pubmed/31730036" } @Article{info:doi/10.2196/10308, author="Mohamad Marzuki, Fadhil Muhamad and Yaacob, Azwany Nor and Yaacob, Majdi Najib", title="Translation, Cross-Cultural Adaptation, and Validation of the Malay Version of the System Usability Scale Questionnaire for the Assessment of Mobile Apps", journal="JMIR Hum Factors", year="2018", month="May", day="14", volume="5", number="2", pages="e10308", keywords="usability", keywords="System Usability Scale", keywords="Malay", keywords="questionnaire translation", keywords="questionnaire validation", keywords="mobile app", abstract="Background: A mobile app is a programmed system designed to be used by a target user on a mobile device. The usability of such a system refers not only to the extent to which product can be used to achieve the task that it was designed for, but also its effectiveness and efficiency, as well as user satisfaction. The System Usability Scale is one of the most commonly used questionnaires used to assess the usability of a system. The original 10-item version of System Usability Scale was developed in English and thus needs to be adapted into local languages to assess the usability of a mobile apps developed in other languages. Objective: The aim of this study is to translate and validate (with cross-cultural adaptation) the English System Usability Scale questionnaire into Malay, the main language spoken in Malaysia. The development of a translated version will allow the usability of mobile apps to be assessed in Malay. Methods: Forward and backward translation of the questionnaire was conducted by groups of Malay native speakers who spoke English as their second language. The final version was obtained after reconciliation and cross-cultural adaptation. The content of the Malay System Usability Scale questionnaire for mobile apps was validated by 10 experts in mobile app development. The efficacy of the questionnaire was further probed by testing the face validity on 10 mobile phone users, followed by reliability testing involving 54 mobile phone users. Results: The content validity index was determined to be 0.91, indicating good relevancy of the 10 items used to assess the usability of a mobile app. Calculation of the face validity index resulted in a value of 0.94, therefore indicating that the questionnaire was easily understood by the users. Reliability testing showed a Cronbach alpha value of .85 (95\% CI 0.79-0.91) indicating that the translated System Usability Scale questionnaire is a reliable tool for the assessment of usability of a mobile app. Conclusions: The Malay System Usability Scale questionnaire is a valid and reliable tool to assess the usability of mobile app in Malaysia. ", doi="10.2196/10308", url="http://humanfactors.jmir.org/2018/2/e10308/", url="http://www.ncbi.nlm.nih.gov/pubmed/29759955" } @Article{info:doi/10.2196/jmir.8347, author="Karnoe, Astrid and Furstrand, Dorthe and Christensen, Bang Karl and Norgaard, Ole and Kayser, Lars", title="Assessing Competencies Needed to Engage With Digital Health Services: Development of the eHealth Literacy Assessment Toolkit", journal="J Med Internet Res", year="2018", month="May", day="10", volume="20", number="5", pages="e178", keywords="health literacy", keywords="computer literacy", keywords="questionnaires", keywords="telemedicine", keywords="consumer health informatics", abstract="Background: To achieve full potential in user-oriented eHealth projects, we need to ensure a match between the eHealth technology and the user's eHealth literacy, described as knowledge and skills. However, there is a lack of multifaceted eHealth literacy assessment tools suitable for screening purposes. Objective: The objective of our study was to develop and validate an eHealth literacy assessment toolkit (eHLA) that assesses individuals' health literacy and digital literacy using a mix of existing and newly developed scales. Methods: From 2011 to 2015, scales were continuously tested and developed in an iterative process, which led to 7 tools being included in the validation study. The eHLA validation version consisted of 4 health-related tools (tool 1: ``functional health literacy,'' tool 2: ``health literacy self-assessment,'' tool 3: ``familiarity with health and health care,'' and tool 4: ``knowledge of health and disease'') and 3 digitally-related tools (tool 5: ``technology familiarity,'' tool 6: ``technology confidence,'' and tool 7: ``incentives for engaging with technology'') that were tested in 475 respondents from a general population sample and an outpatient clinic. Statistical analyses examined floor and ceiling effects, interitem correlations, item-total correlations, and Cronbach coefficient alpha (CCA). Rasch models (RM) examined the fit of data. Tools were reduced in items to secure robust tools fit for screening purposes. Reductions were made based on psychometrics, face validity, and content validity. Results: Tool 1 was not reduced in items; it consequently consists of 10 items. The overall fit to the RM was acceptable (Anderson conditional likelihood ratio, CLR=10.8; df=9; P=.29), and CCA was .67. Tool 2 was reduced from 20 to 9 items. The overall fit to a log-linear RM was acceptable (Anderson CLR=78.4, df=45, P=.002), and CCA was .85. Tool 3 was reduced from 23 to 5 items. The final version showed excellent fit to a log-linear RM (Anderson CLR=47.7, df=40, P=.19), and CCA was .90. Tool 4 was reduced from 12 to 6 items. The fit to a log-linear RM was acceptable (Anderson CLR=42.1, df=18, P=.001), and CCA was .59. Tool 5 was reduced from 20 to 6 items. The fit to the RM was acceptable (Anderson CLR=30.3, df=17, P=.02), and CCA was .94. Tool 6 was reduced from 5 to 4 items. The fit to a log-linear RM taking local dependency (LD) into account was acceptable (Anderson CLR=26.1, df=21, P=.20), and CCA was .91. Tool 7 was reduced from 6 to 4 items. The fit to a log-linear RM taking LD and differential item functioning into account was acceptable (Anderson CLR=23.0, df=29, P=.78), and CCA was .90. Conclusions: The eHLA consists of 7 short, robust scales that assess individual's knowledge and skills related to digital literacy and health literacy. ", doi="10.2196/jmir.8347", url="http://www.jmir.org/2018/5/e178/", url="http://www.ncbi.nlm.nih.gov/pubmed/29748163" } @Article{info:doi/10.2196/jmir.8759, author="Chung, SeonYoon and Park, Kyung Bu and Nahm, Eun-Shim", title="The Korean eHealth Literacy Scale (K-eHEALS): Reliability and Validity Testing in Younger Adults Recruited Online", journal="J Med Internet Res", year="2018", month="Apr", day="20", volume="20", number="4", pages="e138", keywords="eHEALS", keywords="eHealth", keywords="literacy", keywords="reliability", keywords="validity", abstract="Background: In this digital era, eHealth literacy is an essential skill set to leverage health information available online to promote health outcomes. South Korea has an advanced health information technology infrastructure, including widespread use of the internet and mobile phones. A few studies have explored eHealth literacy in South Korea using translated versions of the eHEALS; however, they were not fully validated. A unified reliable and valid assessment tool is critical to assess and enhance the eHealth literacy level across the population. Objective: The aim was to develop a Korean version of eHealth Literacy Scale (K-eHEALS) and evaluate its reliability and validity employing healthy young adults in Korea. Methods: The K-eHEALS was developed based on eHEALS, a widely used tool that measures eHealth literacy, and was validated using a sample of 500 young adults recruited from a pool of a Korean internet survey panel. Content validity was assessed using the content validity index (CVI) for individual items and for scale. Construct validity was examined using exploratory factor analysis and hypothesis testing. The Cronbach alpha coefficient was used to determine the internal consistency and the Pearson correlation coefficient was used to evaluable the stability of the measure (n=55). Results: Both individual and scale CVIs were acceptable (individual CVIs>0.67; scale CVI=0.83). Single factors accounting for 50.3\% of the variance in the scales were extracted revealing the unidimensional latent structure of K-eHEALS. Hypothesis testing showed significant association between eHealth literacy and hours of internet use per day, supporting the construct validity. Items of the K-eHEALS were internally consistent (Cronbach alpha=.88) and stable over a 1-month period (r=.754, P<.001). Conclusions: The findings of this study suggest that K-eHEALS is a valid and reliable measure of eHealth literacy in Korean young adults. Additional studies are needed with more diverse groups of adults in Korea. ", doi="10.2196/jmir.8759", url="http://www.jmir.org/2018/4/e138/", url="http://www.ncbi.nlm.nih.gov/pubmed/29678800" } @Article{info:doi/10.2196/humanfactors.9039, author="Hyde, L. Lisa and Boyes, W. Allison and Evans, Tiffany-Jane and Mackenzie, J. Lisa and Sanson-Fisher, Rob", title="Three-Factor Structure of the eHealth Literacy Scale Among Magnetic Resonance Imaging and Computed Tomography Outpatients: A Confirmatory Factor Analysis", journal="JMIR Hum Factors", year="2018", month="Feb", day="19", volume="5", number="1", pages="e6", keywords="eHealth", keywords="literacy", keywords="factor analysis", keywords="measures", keywords="psychometrics", abstract="Background: Electronic health (eHealth) literacy is needed to effectively engage with Web-based health resources. The 8-item eHealth literacy scale (eHEALS) is a commonly used self-report measure of eHealth literacy. Accumulated evidence has suggested that the eHEALS is unidimensional. However, a recent study by Sudbury-Riley and colleagues suggested that a theoretically-informed three-factor model fit better than a one-factor model. The 3 factors identified were awareness (2 items), skills (3 items), and evaluate (3 items). It is important to determine whether these findings can be replicated in other populations. Objective: The aim of this cross-sectional study was to verify the three-factor eHEALS structure among magnetic resonance imaging (MRI) and computed tomography (CT) medical imaging outpatients. Methods: MRI and CT outpatients were recruited consecutively in the waiting room of one major public hospital. Participants self-completed a touchscreen computer survey, assessing their sociodemographic, scan, and internet use characteristics. The eHEALS was administered to internet users, and the three-factor structure was tested using structural equation modeling. Results: Of 405 invited patients, 87.4\% (354/405) were interested in participating in the study, and of these, 75.7\% (268/354) were eligible. Of the eligible participants, 95.5\% (256/268) completed all eHEALS items. Factor loadings were 0.80 to 0.94 and statistically significant (P<.001). All reliability measures were acceptable (indicator reliability: awareness=.71-.89, skills=.78-.80, evaluate=.64-.79; composite reliability: awareness=.89, skills=.92, evaluate=.89; variance extracted estimates: awareness=.80, skills=.79, evaluate=.72). Two out of three goodness-of-fit indices were adequate (standardized root mean square residual (SRMR)=.038; comparative fit index (CFI)=.944; root mean square error of approximation (RMSEA)=.156). Item 3 was removed because of its significant correlation with item 2 (Lagrange multiplier [LM] estimate 104.02; P<.001) and high loading on 2 factors (LM estimate 91.11; P<.001). All 3 indices of the resulting 7-item model indicated goodness of fit ($\chi$211=11.3; SRMR=.013; CFI=.999; RMSEA=.011). Conclusions: The three-factor eHEALS structure was supported in this sample of MRI and CT medical imaging outpatients. Although further factorial validation studies are needed, these 3 scale factors may be used to identify individuals who could benefit from interventions to improve eHealth literacy awareness, skill, and evaluation competencies. ", doi="10.2196/humanfactors.9039", url="http://humanfactors.jmir.org/2018/1/e6/", url="http://www.ncbi.nlm.nih.gov/pubmed/29459356" }