<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR Human Factors</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Hum Factors</journal-id>
      <journal-title>JMIR Human Factors</journal-title>
      <issn pub-type="epub">2292-9495</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v11i1e48633</article-id>
      <article-id pub-id-type="pmid">39207831</article-id>
      <article-id pub-id-type="doi">10.2196/48633</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Review</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Review</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Barriers to and Facilitators of Artificial Intelligence Adoption in Health Care: Scoping Review</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Jacob</surname>
            <given-names>Christine</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>McMurray</surname>
            <given-names>Josephine</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Soares-Pinto</surname>
            <given-names>Igor</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Zhuang</surname>
            <given-names>Yan</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Wright</surname>
            <given-names>Marcia</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Weinert</surname>
            <given-names>Lina</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Hassan</surname>
            <given-names>Masooma</given-names>
          </name>
          <degrees>BCom, MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Department of Health Information Science</institution>
            <institution>University of Victoria</institution>
            <addr-line>HSD Building, A202</addr-line>
            <addr-line>Victoria, BC, V8W 2Y2</addr-line>
            <country>Canada</country>
            <phone>1 6472876274</phone>
            <email>masooma.d.hassan@gmail.com</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6185-5051</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Kushniruk</surname>
            <given-names>Andre</given-names>
          </name>
          <degrees>BSc, BA, MSc, PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2557-9288</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Borycki</surname>
            <given-names>Elizabeth</given-names>
          </name>
          <degrees>RN, HBScN, MN, PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-0928-8867</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Health Information Science</institution>
        <institution>University of Victoria</institution>
        <addr-line>Victoria, BC</addr-line>
        <country>Canada</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Masooma Hassan <email>masooma.d.hassan@gmail.com</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2024</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>29</day>
        <month>8</month>
        <year>2024</year>
      </pub-date>
      <volume>11</volume>
      <elocation-id>e48633</elocation-id>
      <history>
        <date date-type="received">
          <day>1</day>
          <month>5</month>
          <year>2023</year>
        </date>
        <date date-type="rev-request">
          <day>3</day>
          <month>6</month>
          <year>2023</year>
        </date>
        <date date-type="rev-recd">
          <day>28</day>
          <month>2</month>
          <year>2024</year>
        </date>
        <date date-type="accepted">
          <day>12</day>
          <month>6</month>
          <year>2024</year>
        </date>
      </history>
      <copyright-statement>©Masooma Hassan, Andre Kushniruk, Elizabeth Borycki. Originally published in JMIR Human Factors (https://humanfactors.jmir.org), 29.08.2024.</copyright-statement>
      <copyright-year>2024</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Human Factors, is properly cited. The complete bibliographic information, a link to the original publication on https://humanfactors.jmir.org, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://humanfactors.jmir.org/2024/1/e48633" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Artificial intelligence (AI) use cases in health care are on the rise, with the potential to improve operational efficiency and care outcomes. However, the translation of AI into practical, everyday use has been limited, as its effectiveness relies on successful implementation and adoption by clinicians, patients, and other health care stakeholders.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>As adoption is a key factor in the successful proliferation of an innovation, this scoping review aimed at presenting an overview of the barriers to and facilitators of AI adoption in health care.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>A scoping review was conducted using the guidance provided by the Joanna Briggs Institute and the framework proposed by Arksey and O’Malley. MEDLINE, IEEE Xplore, and ScienceDirect databases were searched to identify publications in English that reported on the barriers to or facilitators of AI adoption in health care. This review focused on articles published between January 2011 and December 2023. The review did not have any limitations regarding the health care setting (hospital or community) or the population (patients, clinicians, physicians, or health care administrators). A thematic analysis was conducted on the selected articles to map factors associated with the barriers to and facilitators of AI adoption in health care.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>A total of 2514 articles were identified in the initial search. After title and abstract reviews, 50 (1.99%) articles were included in the final analysis. These articles were reviewed for the barriers to and facilitators of AI adoption in health care. Most articles were empirical studies, literature reviews, reports, and thought articles. Approximately 18 categories of barriers and facilitators were identified. These were organized sequentially to provide considerations for AI development, implementation, and the overall structure needed to facilitate adoption.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>The literature review revealed that trust is a significant catalyst of adoption, and it was found to be impacted by several barriers identified in this review. A governance structure can be a key facilitator, among others, in ensuring all the elements identified as barriers are addressed appropriately. The findings demonstrate that the implementation of AI in health care is still, in many ways, dependent on the establishment of regulatory and legal frameworks. Further research into a combination of governance and implementation frameworks, models, or theories to enhance trust that would specifically enable adoption is needed to provide the necessary guidance to those translating AI research into practice. Future research could also be expanded to include attempts at understanding patients’ perspectives on complex, high-risk AI use cases and how the use of AI applications affects clinical practice and patient care, including sociotechnical considerations, as more algorithms are implemented in actual clinical environments.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>artificial intelligence</kwd>
        <kwd>governance</kwd>
        <kwd>health information systems</kwd>
        <kwd>artificial intelligence adoption</kwd>
        <kwd>system implementation</kwd>
        <kwd>health care organizations</kwd>
        <kwd>health services</kwd>
        <kwd>mobile phone</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>The onset of the 2020 COVID-19 pandemic has particularly triggered health care organizations across the globe to consider transforming their health delivery models. According to the 2024 Global Health Care Sector Outlook report published by Deloitte, hospitals and health care organizations are addressing challenges by turning toward novel technologies such as cloud computing, artificial intelligence (AI), 5G telecommunications, and interoperable data and analytics to enable care via digital models [<xref ref-type="bibr" rid="ref1">1</xref>]. It was not too long ago, in 2017, when the Canadian government created the Pan-Canadian Artificial Intelligence Strategy and announced an investment plan of CAD $125 million (US $96 million) for research in AI. The Canadian Institute for Advanced Research was mandated to lead this strategy forward with a 5-year plan to enhance Canada’s AI innovation profile on the international stage. Health care is one of the 4 sectors on which the Canadian Institute for Advanced Research is focusing for the advancement of AI research [<xref ref-type="bibr" rid="ref2">2</xref>]. However, health care has seen slow success in the implementation of AI use cases.</p>
      </sec>
      <sec>
        <title>Objective</title>
        <p>The objective of this review was to investigate what is known from existing literature about the barriers to and facilitators of AI adoption in health care and propose recommendations on approaches that would address barriers to adoption.</p>
        <p>We begin this paper by (1) defining AI (before providing some context for AI’s use in health care), (2) describing the most prominent applications of AI in health care, (3) outlining the value that AI is expected to provide, and (4) providing a rationale for this review.</p>
      </sec>
      <sec>
        <title>History and Definitions</title>
        <p>AI is not necessarily a new concept; rather, the exploration of this innovation goes as far back as 10th century China, when mechanical engineer Yan Shi presented to Emperor Zhou mechanical men capable of independently moving their bodies. In the 12th century, al Jazari, who was a polymath, an inventor, and a mechanical engineer, developed humanoid robots. Furthermore, in the 15th century European Renaissance, Leonardo da Vinci similarly developed a knight robot that was able to move different parts of its body on its own. The definition of AI has changed over time, from referring to robotic machines to much more sophisticated technologies capable of mimicking human decision-making processes and behaviors. The advancement of computer systems and languages in the more recent decades has made it possible to progress toward AI systems. The definition that most fits today’s application of AI and is referenced in this paper was coined by John McCarthy in 1956. McCarthy defined AI as “the science and engineering of making intelligent machines” [<xref ref-type="bibr" rid="ref3">3</xref>]. It is unclear what definition is consistently used; however, what is clear is that today’s AI encompasses various techniques aimed at mimicking humanlike intelligence and behavior to allow for the emergence of intelligent technologies capable of problem-solving and decision-making. In this way, AI should be skilled at processing large amounts of information, should arrive at a conclusion through reasoning, and have the ability to learn and solve problems on its own [<xref ref-type="bibr" rid="ref4">4</xref>]. Various analytic techniques are used to allow for this, with the most prominent ones falling under machine learning (ML) and natural language processing (NLP) [<xref ref-type="bibr" rid="ref5">5</xref>].</p>
        <p>Large data sets are needed to develop effective AI algorithms and enable AI’s maturity to arrive at intelligent outputs. In health care, the sources of data for NLP are primarily unstructured data, for example, free-text clinical notes from electronic medical records (EMRs). ML techniques use structured data such as diagnostic images and genomic data. ML uses two primary types of algorithms: (1) supervised and (2) unsupervised. Supervised learning provides more clinically relevant results; hence, AI applications mostly use supervised learning. There are several techniques in supervised learning, with neural networks and support vector machines being the most popular of the techniques [<xref ref-type="bibr" rid="ref5">5</xref>]. The most modern extension of the neural network technique is called deep learning (DL). DL has been made possible due to the increasing availability of large amounts of complex data. This technique has become more popular because of the number of layers of data it can translate. NLP can be used to convert unstructured data into structured data. Therefore, both NLP and ML, along with additional data, are required to train the AI continuously. The more data that are fed into the AI, the “smarter” it becomes. In health care, data sets can be available from various sources, such as electronic health records (EHRs), laboratory tests, diagnostic imaging, electrodiagnosis, genetic diagnosis, and mass screening [<xref ref-type="bibr" rid="ref5">5</xref>]. In 2022, the release of ChatGPT (OpenAI) brought to light the power of large language models. This type of chatbot-style generative AI is being considered to enable extracting data from EMRs and converting them into meaningful outputs that can be useful for clinicians by lowering their administrative burden [<xref ref-type="bibr" rid="ref6">6</xref>].</p>
      </sec>
      <sec>
        <title>Current State of AI Research and Health Care Use Cases</title>
        <p>Research in AI has been exponentially increasing, with bibliometric reporting of published articles on the topic of health care having increased at an annual growth rate of 5.12% over the past 28 years. As of 2021, the most significant increases in bibliometric reporting took place in the 3 years before 2021 [<xref ref-type="bibr" rid="ref7">7</xref>]. According to Tran et al [<xref ref-type="bibr" rid="ref8">8</xref>], the disciplines with the highest number of publications at the intersection of AI and health include cancer, heart diseases and stroke, ophthalmology, Alzheimer disease, and depression. Most publications on the types of AI used reported on robotics, ML, and DL.</p>
        <p>In health care, publications of AI applications are concentrated around operational or administrative efficiency as well as patient care improvement, including better outcomes through improved diagnosis and treatment [<xref ref-type="bibr" rid="ref9">9</xref>]. AI enhances operational and administrative efficiency by providing administrative support to health professionals and improving performance across the organization. AI can achieve this through, for example, its ability to consolidate and provide the latest and most validated research findings that can support clinicians with up-to-date evidence-based decision-making while providing care and its ability to leverage EHR data to predict data heterogeneity between various hospitals and clinics [<xref ref-type="bibr" rid="ref7">7</xref>]. Emergency departments are largely found to have successfully applied AI to optimize resource planning and crowd management [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>]; for example, the Hospital for Sick Children and Humber River Hospital in Ontario, Canada, are using AI to improve emergency department operations by predicting patient surges in the emergency waiting room [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>].</p>
        <p>Use cases aimed at improving care outcomes include predictive analytics around disease outcome prediction or prognosis evaluation and clinical decision support systems [<xref ref-type="bibr" rid="ref7">7</xref>]. Examples of such cases are found in cardiology and include the early detection of atrial fibrillation via a smartphone-based electrocardiogram or cardiovascular risk assessment via patient records. Other promising areas include neurology, specifically stroke prediction and diagnosis [<xref ref-type="bibr" rid="ref5">5</xref>]. Gastroenterology AI applications have also been successfully tested, where algorithms are used to predict outcomes in cases of esophageal cancer and metastasis in colorectal cancer [<xref ref-type="bibr" rid="ref14">14</xref>]. Image-based diagnosis is considered the most successful use of AI applications in health care, largely supporting radiology, dermatology, ophthalmology, and pathology [<xref ref-type="bibr" rid="ref15">15</xref>]. In a review conducted on the literature on AI use in the emergency department, Kirubarajan et al [<xref ref-type="bibr" rid="ref11">11</xref>] reported that 50% of the studies found that AI interventions were better able to diagnose various ailments, such as acute cardiac events and hyperkalemia, among other health conditions.</p>
        <p>As mentioned earlier, AI requires large amounts of data to learn and apply sophisticated reasoning and accurate problem-solving. In addition to the race toward researching AI use cases, a surge in health care data is further setting the stage to allow for accelerated AI innovations [<xref ref-type="bibr" rid="ref16">16</xref>]. EMR data; wearable sensor technology; and genomic, pharmaceutical, and research databases offer opportunities to apply AI to the analysis of health data. Approximately 30% of the world’s data volume is generated by the health care industry. The compound annual growth rate of data for health care is expected to reach 36% by 2025 [<xref ref-type="bibr" rid="ref17">17</xref>]. This growth of data volume in health care is faster than that in manufacturing, financial services, and media and entertainment industries [<xref ref-type="bibr" rid="ref18">18</xref>]. This sets the stage well for developing AI technologies that can be integrated into health care practice, as algorithms now have more data to provide increasingly sophisticated outputs.</p>
      </sec>
      <sec>
        <title>Contributions of the Research</title>
        <p>It is clear that the changing landscape, increasing evidence on AI use cases, and increasing availability of data in health care are setting the path toward realizing real-life applications of AI. However, successful utility requires successful adoption, and a number of studies have reported on the challenges encountered with implementing AI in health care [<xref ref-type="bibr" rid="ref19">19</xref>-<xref ref-type="bibr" rid="ref21">21</xref>]. Health care organizations are especially complex and can be resistant to change due to various reasons associated with legacy structures, a shortage of resources, and high demand. An estimated 70% of health IT projects fail [<xref ref-type="bibr" rid="ref22">22</xref>], and an important characteristic of successful technological implementation is tied to its adoption, which is why adoption is a key component of frameworks such as the unified theory of acceptance and use of technology and nonadoption, abandonment, scale-up, spread, and sustainability theory. These frameworks are used to evaluate and study the acceptance of technologies. For example, the unified theory of acceptance and use of technology framework, which integrates all the available theories about technology adoption, suggests several factors that help understand users’ intention to adopt and use a technology. It looks at all the available theories about technology adoption to evaluate use of information systems [<xref ref-type="bibr" rid="ref23">23</xref>]. Similarly, the nonadoption, abandonment, scale-up, spread, and sustainability framework has incorporated multiple theories to help study factors influencing “non-adoption, abandonment and challenges to scale-up, spread and sustainability of technology-supported change efforts” [<xref ref-type="bibr" rid="ref24">24</xref>]. Both emphasize the importance of studying adoption to support the successful uptake of technologies beyond implementation. With these reasons in mind, it is important that organizations understand the barriers to and facilitators of AI adoption to ensure successful AI implementation. In reference to the widely known work of Everett Rogers, famously known as the Rogers diffusion of innovation theory, Cresswell and Sheikh [<xref ref-type="bibr" rid="ref25">25</xref>] have defined implementation as “the consideration and the introduction of HIT applications,” whereas adoption is defined as “the acceptance and incorporation of HIT applications into everyday practice.”</p>
        <p>An initial search was performed to identify whether any consolidated reviews, such as scoping reviews, were already conducted to understand the barriers to and facilitators of AI adoption in health care. During this search, it was found that a majority of the literature seemed to report on a specific area, such as radiology, in a specific setting (hospital or community), and a number of studies were reporting on implementation findings and not necessarily adoption. A few literature reviews on the determinants of and barriers to AI adoption have been conducted, such as the review by Radhakrishnan and Chattopadhyay [<xref ref-type="bibr" rid="ref26">26</xref>]. However, these reviews span across multiple industries. For health care, 1 systematic review on the <italic>barriers</italic> to AI adoption in health care has been conducted by Assadullah [<xref ref-type="bibr" rid="ref27">27</xref>]. However, there are no consolidated reviews that consider both the <italic>barriers to and facilitators</italic> of AI adoption in health care at large. Therefore, this review has attempted to explore the latter to provide considerations for health care organizations looking to successfully implement AI technologies via increased adoption.</p>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Overview</title>
        <p>This review was guided by the methodology and reporting structure outlined for scoping reviews by the Joanna Briggs Institute as well as Arksey and O’Malley [<xref ref-type="bibr" rid="ref28">28</xref>]. The stages defined by Arksey and O’Malley [<xref ref-type="bibr" rid="ref28">28</xref>] were followed to conduct this scoping review: (1) identifying the research question; (2) identifying relevant studies; (3) selecting studies for inclusion; (4) charting the data; and (5) collating, summarizing, and reporting the results</p>
      </sec>
      <sec>
        <title>Stage 1: Identifying the Research Question</title>
        <p>Because adoption is a key element of successful cost-benefit realization of technological investments, a general question was formed using the “population, concept, and context” approach [<xref ref-type="bibr" rid="ref29">29</xref>]. The first component, “population,” included users or potential users of the AI system, such as patients, providers, health care leaders, researchers, and those who were involved with implementing AI systems in various settings. The second component, “concept,” consisted of barriers to and facilitators of any AI technology. The third component, “context,” was centered on barriers to and facilitators of any AI technology in <italic>any health care setting,</italic> leaving this as broad as possible to maintain the paradigms of a true scoping review. A generic question developed was as follows: What are the barriers to and facilitators of AI adoption in health care?</p>
      </sec>
      <sec>
        <title>Stages 2 and 3: Identifying Relevant Studies and Study Selection</title>
        <p>In commencing the research, eligibility criteria were defined (as described in the Eligibility section). Once the eligibility criteria were defined, the search strategy was identified, and a search for articles was conducted in the selected databases.</p>
        <sec>
          <title>Eligibility (Inclusion and Exclusion Criteria)</title>
          <p>All published studies and gray literature that reported implementation findings related to adoption or reported factors impacting adoption were considered in this review. Therefore, studies with various designs, including quantitative and qualitative studies, literature reviews, thought articles, conference papers, and reports, were included in the initial search and review. “Health care organizations” were defined as organizations that are engaged in providing care to patients or involved in some aspect of providing agency to health care players. Health care players were defined as anyone involved in the process of providing or receiving care, including policy makers; administrative professionals; clinicians; physicians; and, most importantly, patients and their families. All types of AI technologies were considered in this review. Articles were not excluded based on variations in settings (hospital vs community setting) or countries where the research was conducted. Only articles in English were included. Due to the speed at which the landscape for AI is advancing, only articles that were published between January 2011 and December 2023 (when the search was conducted) were included.</p>
        </sec>
        <sec>
          <title>Search Strategy</title>
          <p>This review is intended to synthesize findings from publications that reported on the barriers to and facilitators of the adoption of AI implementations. A search was conducted on MEDLINE, ScienceDirect, and IEEE Xplore in December 2023. Keywords were selected in reference to the question identified to formulate the scope. Keywords included “artificial intelligence”; “healthcare” or “health care”; “hospital,” “health services,” or “health facilities”; “adoption”; “barriers”; “obstacles”; “challenges”; “facilitators”; and “enablers” (<xref ref-type="boxed-text" rid="box1">Textbox 1</xref>).</p>
          <boxed-text id="box1" position="float">
            <title>Search query.</title>
            <p>
              <bold>Query</bold>
            </p>
            <list list-type="bullet">
              <list-item>
                <p>“artificial intelligence” AND healthcare or health care or hospital or health services or health facilities AND adoption AND barriers or obstacles or challenges or facilitators or enablers</p>
              </list-item>
              <list-item>
                <p>“artificial intelligence” AND health AND adoption AND (Barrier OR Facilitator)</p>
              </list-item>
            </list>
          </boxed-text>
        </sec>
        <sec>
          <title>Study Selection</title>
          <p>A total of 2 reviewers independently screened the articles from the initial search by reviewing their titles and abstracts. Articles meeting the inclusion criteria were identified. Articles that did not meet the inclusion criteria were excluded. Any discrepancies were resolved through discussion.</p>
          <p>Of the articles identified, the full text of the semifinal set of articles was reviewed to further refine selected articles. This process was iterative, and some exclusions were made during the writing phase, as the findings evolved. An Excel spreadsheet (Microsoft Corp) was used to record the articles identified. Recordings included the following details: the name of the article, authors, journal, whether the article was peer reviewed, type of paper, discipline, country, region, method, population, end users, and type of AI application (if specified). Duplicate studies were identified and removed to ensure there was no overlap.</p>
        </sec>
      </sec>
      <sec>
        <title>Stages 4 and 5: Charting the Data and Collating, Summarizing, and Reporting the Results</title>
        <p>A conventional content analysis approach was used to review the articles, chart the data, and identify themes [<xref ref-type="bibr" rid="ref30">30</xref>]. Publications meeting the inclusion criteria were reviewed in detail, and an inductive approach was used to identify themes. First, the articles were read in full for the author to immerse into the content. This was followed by carefully reading each article and highlighting key concepts around barriers and facilitators that appeared to repeat across all the articles. These initial key concepts were recorded as themes, and this process helped identify many themes that were further categorized and grouped based on similarity. All data were charted in an Excel table to help with the analysis.</p>
      </sec>
      <sec>
        <title>Ethical Considerations</title>
        <p>This work received an ethics exemption from the University of Victoria ethics board due to the nature of research being a literature review.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Overview</title>
        <p>The initial search from MEDLINE, IEEE Xplore, and ScienceDirect provided cumulative results of 2514 publications. After screening the results, 483 (19.21%) publications were included for abstract review based on the title of the study. After abstract review, 134 (27.7%) publications were identified for further text review, further excluding 345 (71.4%) publications, including 4 (0.8%) duplicate articles. Out of the 134 studies, 50 (37.3%) went through a thorough and more detailed review and thematic analysis. <xref rid="figure1" ref-type="fig">Figure 1</xref> presents the PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) flow diagram. The breakdown of studies is based on the country of origin, types of articles selected, and health care discipline or area covered. Overall, 11 articles were from the United States, 7 from China, 5 each from the United Kingdom and Canada, and 3 each from Germany and the Netherlands; the remainder of the articles were from Australia, France, Germany, India, Indonesia and Taiwan, Italy, New Zealand, Saudi Arabia, Singapore, Sweden, Switzerland, and other European countries. In some cases, multiple countries or regions collaborated to publish the articles together, including different European countries or the United Kingdom and United States. A total of 13% of the articles were literature reviews and 8% were mixed methods studies. The rest of the articles were cross-sectional studies, ethnographic or qualitative studies, case studies, white papers, and thought articles. In terms of setting, the majority of the articles discussed AI in health care in general with a majority of the articles reporting from the field of radiology or oncology. The setting of the remainder of the articles were academic hospitals, ophthalmology clinics, hospital, primary care, and dermatology clinics.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) flow diagram showing the study selection process.</p>
          </caption>
          <graphic xlink:href="humanfactors_v11i1e48633_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Thematic Analysis</title>
        <sec>
          <title>Overview</title>
          <p>On the basis of the conventional content analysis approach used (as described in the Methods section), a total of 18 categories of barriers and facilitators were identified (<xref rid="figure2" ref-type="fig">Figure 2</xref>). Interestingly, the themes were found to provide perspective on both facilitators and barriers. For example, if the theme explainability was identified as a barrier, the same theme was tabulated as a facilitator to capture what the articles recommended for overcoming challenges with explainability to increase adoption. As such, the reporting of the results for each theme provided perspective on the theme being both a barrier and facilitator, with the exception of governance, which was entirely noted as a key facilitator.</p>
          <fig id="figure2" position="float">
            <label>Figure 2</label>
            <caption>
              <p>Themes identified. AI: artificial intelligence.</p>
            </caption>
            <graphic xlink:href="humanfactors_v11i1e48633_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>Transparency and Explainability</title>
          <p>Explainability can be defined as the ability to deconstruct an algorithm to understand the mechanism by which it arrived at the output. Explainability has gained prominence due to the fast-paced growth of ML algorithms such as DL. These algorithms are labeled “black box” due to the difficulty in interpreting and tracing the techniques used by the AI models, thereby impacting trust and demanding the need for transparency [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>]. According to Holzinger et al [<xref ref-type="bibr" rid="ref33">33</xref>], “explainable AI deals with the implementation of transparency and traceability of statistical black-box machine learning methods, particularly deep learning.” While transparency has a wider definition and explainability is a component of transparency [<xref ref-type="bibr" rid="ref34">34</xref>], most studies have noted explainability in the context of algorithmic transparency; therefore, findings from these 2 interrelated concepts have been discussed together.</p>
          <p>Several studies noted a lack of explainability as a barrier to adoption. Baxter et al [<xref ref-type="bibr" rid="ref35">35</xref>] reported concerns from adopters around the lack of explainability regarding the prediction of the AI algorithm embedded in the EHR to predict unplanned readmission; specifically, the lack of explainability regarding what features of the algorithm were driving the output was an impediment to trust among adopters. Other studies noted that the way the data were being used to train algorithms was not clear. The lack of traceability and logical understanding of how the algorithm arrived at a recommendation contradicted a key foundation of evidence-based medicine, which relies on high standards of explainability. Clinicians expressed the need to understand both the scientific and clinical bases of the recommendations provided by the AI to confidently validate and apply the decision [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref36">36</xref>-<xref ref-type="bibr" rid="ref38">38</xref>]. Morrison [<xref ref-type="bibr" rid="ref36">36</xref>] particularly quoted stakeholders seeking clarity on the extent to which there is a need to provide transparency on AI output to patients and how this is directed by legislation or data protection laws. In a study conducted by Nadarzynski et al [<xref ref-type="bibr" rid="ref39">39</xref>], users of an AI-enabled chatbot reported hesitancy to use the technology due to a lack of transparency on how the chatbot accurately arrives at responses to health inquiries.</p>
          <p>According to Holzinger et al [<xref ref-type="bibr" rid="ref33">33</xref>], “explainability is an important element for consideration in order to enhance trust of medical professionals.” To facilitate adoption, improving algorithmic transparency will be a key consideration to change attitudes and build the trust of adopters [<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]. Additional recommendations around facilitating adoption were to have processes in place to support clinicians in case of disagreements on decisions due to a lack of transparency and explainability [<xref ref-type="bibr" rid="ref41">41</xref>]. Furthermore, revealing the process of how the algorithm was developed, who was involved in the development process, whether clinicians were consulted, and how the data were processed would enable acceptability [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref43">43</xref>].</p>
          <p>In health care, causality is especially important when using automated decision systems; therefore, Holzinger et al [<xref ref-type="bibr" rid="ref33">33</xref>] emphasized that AI systems should support the understanding and explanation of the causal models as opposed to simply solving through pattern recognition. Similarly, Gillner [<xref ref-type="bibr" rid="ref44">44</xref>] also noted that this opacity of the output is not aligned with the “medical ethos.” Weinert et al [<xref ref-type="bibr" rid="ref45">45</xref>] recommended that investing into explainable AI that produces a transparent and understandable AI could help address the issue of acceptability. Moorman [<xref ref-type="bibr" rid="ref38">38</xref>] reported that successful adoption was achieved by publishing evidence on the algorithm’s underpinnings and providing clinicians with details on how data elements interacted within the algorithm to produce the predictive output.</p>
        </sec>
        <sec>
          <title>Algorithm Bias, Equity, or Fairness</title>
          <p>A prominent theme that came through was around the prevention of algorithm bias to ensure equity and fairness and avoid concealed discrimination. Algorithmic bias has been defined by Panch et al [<xref ref-type="bibr" rid="ref46">46</xref>] as “the instances when the application of an algorithm compounds existing inequities in socioeconomic status, race, ethnic background, religion, gender, disability or sexual orientation to amplify them and adversely impact inequities in health systems.” Such biases have been visibly found in glomerular filtration rate and pulmonary function and have continued to persist despite efforts to address them. Inaccurate and underrepresentative training data sets for AI models can cause bias, misleading predictions, adverse events, and large-scale discrimination, causing barriers to adoption [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref48">48</xref>]. Baxter et al [<xref ref-type="bibr" rid="ref35">35</xref>] and Chua et al [<xref ref-type="bibr" rid="ref49">49</xref>] reported clinical stakeholders’ concerns around the relevance of the AI model’s output, especially because the algorithm did not consider social determinants of health to predict risk outcomes for readmissions. Similar concerns were raised by participants from other studies around the risk of algorithm bias as a challenge for adoption. Others expressed dissatisfaction that the AI algorithm may not be representative of the patient population among whom it is implemented or may have been trained with a biased training data set that has been retrofitted to produce certain results, therefore not providing a representative outcome of interest [<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref49">49</xref>-<xref ref-type="bibr" rid="ref51">51</xref>]. AI not accounting for patients’ health determinants was noted as a “grand challenge” [<xref ref-type="bibr" rid="ref19">19</xref>].</p>
          <p>Inadequate data from representative groups, algorithms designed to represent a majority, and missing variables that impact predictions are components that contribute to bias [<xref ref-type="bibr" rid="ref21">21</xref>]. This can be addressed by engaging clinicians in the design and development of the algorithm to ensure that appropriate measures are taken to address bias before the AI algorithm is deployed.</p>
        </sec>
        <sec>
          <title>Functionality (Accuracy and Usefulness)</title>
          <p>One of the major themes that came through was around the value, usefulness, and accuracy of an AI algorithm. Accuracy and quality of the AI algorithm’s output were primary reasons for adoption hesitancy in the context of the functionality of the AI. In some studies, patients reported the need to assess the usefulness of the AI before using it and had concerns around the quality and accuracy of the output, thereby questioning the value of AI as a whole [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref52">52</xref>-<xref ref-type="bibr" rid="ref55">55</xref>]. Clinicians also reported concerns around the usefulness of the output based on the lack of accuracy and inactionable output contributing to a low likelihood of use. This also included dissatisfaction if recommendations were too similar, inappropriate, or not useful [<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref56">56</xref>]. Ease of use; complex interfaces; and inconsistent performance, for example, due to false positives and negatives add burden to the workflow, creating more work for clinicians, thereby impeding adoption [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref57">57</xref>-<xref ref-type="bibr" rid="ref59">59</xref>]. Morrison [<xref ref-type="bibr" rid="ref36">36</xref>] identified a lack of an agreed standard and benchmark for accuracy (how accurate does an AI tool need to be before it is approved for clinical practice) as an impediment to implementation and, subsequently, adoption, as, if a standard existed, it could provide rationale for the accuracy. Temsah et al [<xref ref-type="bibr" rid="ref60">60</xref>] recommend the application of evidence-based oversight mechanisms to ensure accuracy and dependability. Finally, Choudhury [<xref ref-type="bibr" rid="ref61">61</xref>] noted that if an algorithm is not performing up to standards or adds more work to the clinicians, this can impact adoption, as clinicians perceive it as a high risk.</p>
          <p>Perceived benefit, perceived usefulness, usability, ease of use, usefulness, accuracy, and reliability of the output of the AI are key contributors to adoption [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref62">62</xref>-<xref ref-type="bibr" rid="ref67">67</xref>]. In particular, usability and acceptability should be assessed with the intended user in mind [<xref ref-type="bibr" rid="ref41">41</xref>]. Perceived benefit is especially important when it contributes to improved efficiency in clinical processes [<xref ref-type="bibr" rid="ref62">62</xref>].</p>
          <p>From a patient’s perspective, there is value if an AI technology can be used from home for minor consultations, such as skin cancer detection using an AI-enabled app [<xref ref-type="bibr" rid="ref42">42</xref>]. Ease of use of the technology and accessibility to information for minor health concerns [<xref ref-type="bibr" rid="ref39">39</xref>] are especially seen as valuable, as they negate the need for a visit to the physician; however, in the event that a visit is required based on the AI’s recommendation, then the integration of the technology with the health care system is considered beneficial [<xref ref-type="bibr" rid="ref42">42</xref>]. It is essential that as the AI system matures, it is designed such that it can “adopt and challenge contradictory rules and behaviours” [<xref ref-type="bibr" rid="ref43">43</xref>].</p>
        </sec>
        <sec>
          <title>Risk of Harm</title>
          <p>Patient safety concerns causing adverse effects were noted by Mlodzinski et al [<xref ref-type="bibr" rid="ref48">48</xref>] and Vijayakumar et al [<xref ref-type="bibr" rid="ref59">59</xref>]. The lack of accuracy of AI output was also considered to pose a potential risk of harm by both clinicians and patients, as, in some cases where the algorithm may output false negative results, it may provide an incorrect sense of reassurance and cause a delay in treatment. However, in cases where the algorithm is too sensitive, thereby providing false positive results, it may add work and costs to the treatment process [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref61">61</xref>,<xref ref-type="bibr" rid="ref68">68</xref>]. The risk of harm can be lowered if AI algorithms are developed with the 5 rights (in the case of an automated decision system), similar to other clinical decision support tools [<xref ref-type="bibr" rid="ref40">40</xref>]. In addition, Sangers et al [<xref ref-type="bibr" rid="ref42">42</xref>] proposed that in some cases, AI applications should provide only risk indication instead of a diagnosis to reduce the risk of harm. Chen et al [<xref ref-type="bibr" rid="ref68">68</xref>] reported that policies and mechanisms to safeguard professionals could address challenges associated with a potential risk of harm due to a lack of output accuracy.</p>
        </sec>
        <sec>
          <title>Trust</title>
          <p>Lack of accuracy; doubts about unsafe results; privacy breaches; patients’ perceptions and acceptance of automation; and uncertainty about developers’ reliability, availability, usability, and perceived usefulness were found to be obstacles to gaining trust [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref70">70</xref>]. Clinicians expressed fear around having to reframe their professional identity and responsibilities [<xref ref-type="bibr" rid="ref57">57</xref>]. Fear around what AI really meant was noted as another barrier [<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref42">42</xref>].</p>
          <p>Facilitators of trust included the endorsement of the technology by experts as well as academically backed clinicians, including regulating bodies such as the government; evidence of output accuracy based on the evaluation of AI; and positive opinions from trusted thought leaders in the respective clinical fields [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref60">60</xref>]. Another facilitator of trust is the engagement of patients in the development of AI. This could facilitate trust in the public and address concerns around trust in data sharing [<xref ref-type="bibr" rid="ref71">71</xref>]. Overall, trust was largely found to be associated with perceived usefulness; however, one study noted that “if peoples’ confidence and beliefs are improved, they will use the product despite its usefulness<bold><italic>”</italic></bold> [<xref ref-type="bibr" rid="ref52">52</xref>]. In addition, Fan et al [<xref ref-type="bibr" rid="ref72">72</xref>] reported that initial trust is a key indicator of the intention to use the AI application and noted that if the confidence to use the AI application is high based on performance expectancy, then this will increase trust in using the AI.</p>
        </sec>
        <sec>
          <title>Human-AI Teaming</title>
          <p>The lack of human intervention was found to be a barrier to adoption for both clinicians and patients. From a clinician standpoint, physicians expressed that they would be less likely to use an AI, given their familiarity with the patient’s condition and the value of intuition in clinical decision-making [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref35">35</xref>]. Mlodzinski et al [<xref ref-type="bibr" rid="ref48">48</xref>] reported concerns about potential systemic bias present in the AI that could impact the patient-provider relationship. From a patient standpoint, the lack of human presence was seen as a limitation due to a lack of empathy and emotional connectivity with another human or simply not having the presence of a human physician to verbally communicate and discuss, such as when using an AI app or chatbot [<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref64">64</xref>]. By contrast, the lack of human presence was, in some cases, seen as a benefit due to the anonymity in sharing intimate or uncomfortable health concerns [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]. Hemphill et al [<xref ref-type="bibr" rid="ref73">73</xref>] reported increased confidence in patients when AI was combined with clinical interpretation.</p>
        </sec>
        <sec>
          <title>Aligning Strategic Components</title>
          <p>Several studies particularly highlighted the importance of strategic alignment with initiatives. Baxter et al [<xref ref-type="bibr" rid="ref35">35</xref>] reported on how a lack of alignment among different organizational initiatives led to varying outcomes and disjointed communication. Strohm et al [<xref ref-type="bibr" rid="ref57">57</xref>], on the other hand, talked about how strategic alignment could lead to better dispersing of funds across different departments.</p>
          <p>Sun and Medaglia [<xref ref-type="bibr" rid="ref50">50</xref>] pointed out the necessity of outlining a comprehensive “top-down strategy” that would include organizations’ goals and resource distribution for AI implementation. Weinert et al [<xref ref-type="bibr" rid="ref45">45</xref>] elaborated that to overcome the barrier to including AI initiatives in the organizational strategy, the German government introduced a new law that supported organizations with financial assistance to implement innovative digital technologies such as AI, as there was hesitancy among organizations to include expensive AI implementations as part of their strategy due to a lack of funds.</p>
        </sec>
        <sec>
          <title>Use Case–Driven or Problem-Driven AI</title>
          <p>Several studies noted that to start the journey of implementing AI, there is a need to identify a problem and not merely use data to come up with a solution. Therefore, use cases should be identified based on notable problems that can be addressed by AI solutions. One particular study mentioned how the lack of a use case affected the implementation of the AI model [<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref36">36</xref>].</p>
        </sec>
        <sec>
          <title>End-User Engagement or Co-Design</title>
          <p>A lack of sufficient buy-in from end users and a lack of endorsement from organizational leadership emerged as barriers, including not engaging stakeholders early in the process. It is critical to incorporate clinicians and other stakeholders, such as patients, in the development life cycle, especially the testing phase with the application of a user-centered design and testing approaches. This may be time intensive but proves to be an effective approach to enable adoption [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]. As Ongena et al [<xref ref-type="bibr" rid="ref69">69</xref>] pointed out in their findings, patients should be involved when developing AI systems, specifically for diagnostic, treatment planning, and prognostic purposes. Pou-Prom et al [<xref ref-type="bibr" rid="ref74">74</xref>] reported the usefulness of engaging end users in designing, deploying, and refining the AI solution. Moorman [<xref ref-type="bibr" rid="ref38">38</xref>] recommended maximizing buy-in and engagement at all levels of stakeholders, from leadership to users, and especially engaging a clinician leader from the onset. Finally, Goldstein et al [<xref ref-type="bibr" rid="ref75">75</xref>] noted the inclusion of champions at the leadership and clinical levels to achieve successful implementation.</p>
        </sec>
        <sec>
          <title>Workflow Integration</title>
          <p>This review found that the lack of integration of the AI system into the workflow can be a barrier to successful implementation and adoption. For example, Baxter et al [<xref ref-type="bibr" rid="ref35">35</xref>] reported impacts on success due to variations in existing workflows for risk assessments and readmission scores across different areas. Similarly, Strohm et al [<xref ref-type="bibr" rid="ref57">57</xref>] mentioned how the lack of integration and standardization of workflows led to variations in workflows. For other types of AI solutions, such as apps, the data not being integrated into the health care system or workflow was seen as a barrier for patients to adopt the solution [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref53">53</xref>]. Helenason et al [<xref ref-type="bibr" rid="ref58">58</xref>] and Schepart et al [<xref ref-type="bibr" rid="ref56">56</xref>] both noted the importance of conformity with the workflow when integrating AI tools into the environment where they would be used.</p>
          <p>Recommendations included the following: ensuring that AI applications easily integrate with existing IT systems, integrating data from patients’ use of AI-enabled apps into the health care system and workflow, and considering the integration of the AI into the clinical workflow but maintaining autonomy for clinicians to have the final say [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref75">75</xref>]. In situations where the AI system is deployed in different areas, using a common model may improve alignment in workflows [<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]. Chen et al [<xref ref-type="bibr" rid="ref68">68</xref>] reported that clinicians saw AI integration into the workflow as a positive if the system was seen as potentially eliminating routine work, allowing them to focus on other tasks. Moorman [<xref ref-type="bibr" rid="ref38">38</xref>] reported that it was helpful to assess existing unit workflows, communications, escalation, and event management processes before implementation to address challenges brought up by clinicians concerned about added work.</p>
        </sec>
        <sec>
          <title>Awareness and Training</title>
          <p>Training refers to educating the users on various aspects of the technology, such as the outcomes of the AI model, its benefits, and how it supports the clinical workflow, and providing new skills such as technical and data science skills to staff, especially laggards and champions, to assist with the adoption [<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref62">62</xref>]. Kelly et al [<xref ref-type="bibr" rid="ref21">21</xref>] particularly noted that to provide clinicians with clarity on how an algorithm could improve patient care, approaches such as using a decision curve analysis that would provide quantified benefits of using a model to inform actions that need to be taken would be helpful. Skepticism and a lack of understanding were seen as barriers to AI adoption [<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref51">51</xref>]. Chen et al [<xref ref-type="bibr" rid="ref68">68</xref>] found that AI adoption increased among radiologists who were more familiar with AI. Sun [<xref ref-type="bibr" rid="ref76">76</xref>] recommended that implementation teams should consider influencing clinicians by sharing AI knowledge via more informal communications, such as social media communication or in-person communication. Moorman [<xref ref-type="bibr" rid="ref38">38</xref>] found it helpful to develop educational material with input from clinicians to tailor it to the clinical role and hospital culture. Training and awareness should include building an understanding of the technology; providing clarity around language such as the definition of AI; education around data use in health care; and building awareness on the value of AI, including breaking down concepts that dispel fear [<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref71">71</xref>]. Clinicians’ awareness and knowledge of AI before using it contributed to its successful acceptance [<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref73">73</xref>]. In addition, users feel that the technology is “unqualified” based on their perception of the premature nature of the technology [<xref ref-type="bibr" rid="ref39">39</xref>]. Misunderstanding of the capabilities of AI technologies in the general public and the health care sector is a challenge to adoption, with gaps in awareness around the value, advantages, and high expectations of AI [<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]. Overall, Alsobhi et al [<xref ref-type="bibr" rid="ref70">70</xref>] emphasized the urgency of accelerating AI adoption through the dissemination of AI training.</p>
        </sec>
        <sec>
          <title>Resources and Infrastructure</title>
          <p>Shortages of personnel with the required skills were reported as barriers, along with the quality of IT infrastructure available for AI implementation [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]. Hickman et al [<xref ref-type="bibr" rid="ref71">71</xref>], in particular, noted the lack of technological (infrastructure) maturity to allow for the integration of AI. The presence of AI experts, perhaps a multidisciplinary team, particularly with clinical scientists, data science and subject matter experts with AI skills, an innovation manager, AI experts to provide training, and local champions within departments involved in the end-to-end process, was considered an important element for adoption [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref62">62</xref>,<xref ref-type="bibr" rid="ref71">71</xref>]. Goldstein et al [<xref ref-type="bibr" rid="ref75">75</xref>] noted that for scalability, where the AI application would be deployed in multiple sites, having designated resources from the onset of the project with clear roles and responsibilities was seen with success. Yang et al [<xref ref-type="bibr" rid="ref77">77</xref>] recommended cultivating talent with both high-level medical and technology knowledge and understanding how the 2 domains can be used to meet patients’ needs. In a different perspective on the shortage of resources, Chen et al [<xref ref-type="bibr" rid="ref68">68</xref>] noted that radiographers and radiologists held more positive attitudes toward the adoption of AI, as it would help address workforce shortages in the radiology field in the United Kingdom.</p>
        </sec>
        <sec>
          <title>Evaluation and Validation</title>
          <p>The need for evaluation on multiple fronts was noted by various studies. Studies indicated that the technical evaluation of AI is necessary as a first step to validation. Technical evaluation must be followed by clinical validation (based on established methods in clinical research) and economic validation. Wolff et al [<xref ref-type="bibr" rid="ref78">78</xref>] particularly noted the lack of clinical and economic measurements as a barrier to practical implementation. Evaluations should be tailored toward digital technologies to gather empirical evidence surrounding the value of AI’s use [<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref78">78</xref>]. However, the cost of conducting an empirical evaluation and a quantified clinical trial–type evaluation may be a deterrent to the pace of developing the technology. Therefore, this should be considered when selecting the type of evaluation to be conducted. A focus on assessing the effectiveness and accuracy was duly highlighted. Implementers should consider validating or testing the algorithm with synthetic data [<xref ref-type="bibr" rid="ref40">40</xref>-<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref57">57</xref>]. Establishing a standard methodology for the validation of AI algorithms and the overall evaluation of AI will be critical to gaining confidence from adopters [<xref ref-type="bibr" rid="ref50">50</xref>]. Hickman et al [<xref ref-type="bibr" rid="ref71">71</xref>] suggested that having structures in place for the continued monitoring of standards that impact AI (eg, regulatory standards) and ensuring that infrastructure is in place to evaluate and monitor algorithms continuously are necessary.</p>
        </sec>
        <sec>
          <title>Data Security, Ownership, Quality, and Availability</title>
          <p>Data quality, security, ownership, and storage were prominent themes in the reviewed studies. In terms of data quality and integrity, several issues were identified as barriers to developing good AI models that provide value to users. These were issues around variability, the nature of unstructured data, incompleteness of data, the data not representing the reality of clinical care, and the absence of data standards (specifically around how and what data are collected). Having metadata standards, terminologies, data quality metrics, and common data models were identified as facilitators in resolving some of these issues [<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]. Fragmented access to data and limited sources, such as the availability of data only from EHRs or data silos, were also noted as barriers [<xref ref-type="bibr" rid="ref78">78</xref>].</p>
          <p>Data access, integrity, and provenance are key to the development of models. Institutions that were the most successful in implementing AI were thoughtful about how to guarantee data integrity [<xref ref-type="bibr" rid="ref40">40</xref>]. Wolff et al [<xref ref-type="bibr" rid="ref78">78</xref>] noted the challenges with data silos and fragmented access to medical data, including limitations on the availability of data only from the EHR, to enable the development of robust AI applications. In terms of data ownership, the dilemma around who owns the data, whether it would be the government, institution (eg, hospital), or patient, is a barrier to adoption, as it leaves questions around how data would be integrated or accessible for future AI advancement [<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]. While this may not present a direct adoption challenge, it does indirectly impact the availability of data required for development and to produce meaningful outputs, which is an impediment to adoption, as identified earlier.</p>
          <p>Data security was identified as a major contributor toward hesitancy, with concerns around cybersecurity relating to both training and testing data as well as fear of trackers and spyware obtaining unsolicited data [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref77">77</xref>]. Furthermore, the ability of deidentified data to be reidentified poses a major risk for the individuals and institutions providing their data. This further contributes to resistance to sharing the data that could help expand data sets for AI training. Several strategies for preventing security breaches have been proposed, and these could be helpful in securing data, especially health data that are accessible over the web [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref42">42</xref>].</p>
          <p>Concerns around data processing include the lack of understanding of how data are stored, processed, and accessed; the establishment of protocols; and compliance with existing privacy policies, such as the General Data Protection Regulation and the Health Insurance Portability and Accountability Act [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]. A survey of patients conducted by Ongena et al [<xref ref-type="bibr" rid="ref69">69</xref>] indicated the need for patients to be informed about how and specifically which data are processed. One study explicitly highlighted the issue of data ownership. Concerns over data ownership were particularly evident when patients linked data ownership to trust in the technology [<xref ref-type="bibr" rid="ref47">47</xref>].</p>
        </sec>
        <sec>
          <title>Ethics and Privacy</title>
          <p>Concerns about privacy and ethics were focused on maintaining confidentiality, ensuring processes are in place to obtain consent, and having informed consent with clarity on how the data will be processed [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref56">56</xref>]. Having clear consent processes related to how data are generated through the use of AI and how these data flow as well as defining the meaning of consent and transparency on strategies to maintain privacy are seen as facilitators of adoption. This is especially applicable to “clinical and epidemiological use cases of ML in both decision support and automation categories, as data from patients or the public are essential to train algorithms in these areas” [<xref ref-type="bibr" rid="ref20">20</xref>]. In addition, Sun and Medaglia [<xref ref-type="bibr" rid="ref50">50</xref>] particularly pointed out the unethical use of data, such as data being used by commercial organizations. Ensuring transparency to end users, especially patients involved in the ethical and legal frameworks that guide the development of AI systems, could be helpful [<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref73">73</xref>]. Weinert et al [<xref ref-type="bibr" rid="ref45">45</xref>] particularly identified ethical issues, specifically as they relate to liability, as a barrier to AI adoption. Wolff et al [<xref ref-type="bibr" rid="ref78">78</xref>] noted that integrating “privacy-by-design” technologies into AI applications that incorporate advanced data protection features could mitigate such challenges.</p>
        </sec>
        <sec>
          <title>Governance</title>
          <p>Governance was primarily noted as a key facilitating factor, playing the role of enabling the full cycle of AI. It is critical to have a governance structure in place to oversee the development and rollout of AI from conception to implementation, with governance tools providing guidance on various stages of the process. Governance should include diverse professionals with clear articulation of accountability, including nuances in reactions to accountability [<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref58">58</xref>,<xref ref-type="bibr" rid="ref64">64</xref>]. Isbanner et al [<xref ref-type="bibr" rid="ref64">64</xref>] noted the importance of articulating accountability. This is especially important in health care because “ethical and governance challenges matter to the public.” Wolff et al [<xref ref-type="bibr" rid="ref78">78</xref>] recommended outlining specific responsibilities for different stakeholders to delineate accountability-based steps in the process, for example, identifying who would review an x-ray image analysis and identifying liability and culpability (eg, obligatory human check of a decision obtained by an AI application). According to Sunarti et al [<xref ref-type="bibr" rid="ref47">47</xref>], the governance body should include “developers of software, government officials, health care, medical practitioners and advocacy for patients groups.” The lack of accountability in the decision-making process is a challenge; therefore, framing this in the governance model could be a way to address adoption issues related to accountability [<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref73">73</xref>]. Other functions of the governance body would be to ensure funding and connectivity to the wider data science community, ensure alignment with strategic initiatives in the organization, and act as a long-term centralized knowledge repository for performance oversight. A governance model should have mechanisms and systems in place to facilitate changes impacting AI technologies in development or use based on cyclical changes in the technology or changes in the external landscape, such as the ones initiated by regulatory bodies [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref79">79</xref>]. Formalized analysis of ethical considerations in the development and use of AI should be a key component of governance. Governance should also be linked to the data governance committees for various data processing, quality, and integrity oversights [<xref ref-type="bibr" rid="ref43">43</xref>]. One particular solution proposed by Morrison [<xref ref-type="bibr" rid="ref36">36</xref>] was to have national-level governance templates that would facilitate national data protection via the implementation of impact assessments. In contrast, governance can hinder data sharing. Therefore, governance bodies should maintain a rigorous process without becoming a constraint [<xref ref-type="bibr" rid="ref36">36</xref>].</p>
        </sec>
        <sec>
          <title>Regulatory and Legal Frameworks</title>
          <p>A lack of regulation and policies from the government, including uncertainty around legal direction or law, was presented as a barrier to the application of AI technologies [<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref60">60</xref>,<xref ref-type="bibr" rid="ref77">77</xref>,<xref ref-type="bibr" rid="ref78">78</xref>]. Other researchers noted that there was no clarity in the area of regulatory structures with regard to which regulatory body should be consulted for AI developments and deployments [<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref53">53</xref>]. Therefore, it would be essential for governments to establish regulatory bodies and legal frameworks to provide guidance on various aspects of AI development and application [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]. In addition, ambiguity surrounding malpractice liability policy as it relates to physicians’ legal responsibilities, for example, in case of diagnostic errors, remains a barrier to AI adoption [<xref ref-type="bibr" rid="ref49">49</xref>].</p>
        </sec>
        <sec>
          <title>Funding and Cost</title>
          <p>The lack of and uncertainty surrounding funding are presented as barriers to implementation [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref56">56</xref>]. Researchers have suggested that there is a need to have costs identified from the start-up stage all the way to scalability. Funding can especially be a barrier if an AI technology is lacking in evidence, with little to show for the value it provides. Sun and Medaglia [<xref ref-type="bibr" rid="ref50">50</xref>] and Xing et al [<xref ref-type="bibr" rid="ref53">53</xref>] reported that financial barriers in the context of cost and benefits, the lack of a sustainable business model, and insufficient funding to meet public demands should also be considered. Sun and Medaglia [<xref ref-type="bibr" rid="ref50">50</xref>] additionally noted challenges associated with the adoption of IBM Watson in China due to patients having to pay high fees for the service. Finally, funding should be cohesively considered not only for the development of the technology but also for resources required to implement the technology, such as technical subject matter experts, project managers, and champions [<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref57">57</xref>]. Weinert et al [<xref ref-type="bibr" rid="ref45">45</xref>] noted that to overcome the barrier of lack of resources and meet the financial investment demands of AI implementations, the German government introduced a new law that could help organizations bridge funding gaps; however, they could not conclude whether this would facilitate any progress, as the announcement was just made.</p>
        </sec>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>The principal findings of this study imply several factors impacting the adoption of AI systems, and for each barrier identified, there are corresponding facilitators. Ethics, bias, and transparency or explainability are core considerations in developing trustworthy and adoption-centric AI systems. Furthermore, the barriers identified should be holistically synergized within a governance framework, one that ideally oversees the entire end-to-end process, from ideation to the implementation and sustainability of AI systems.</p>
        <p>Trust emerged as one of the most critical elements of AI adoption. This review revealed that trust can either be facilitated or impacted by almost all the themes identified in this scoping review. More specifically, fairness, explainability, and ethics seem to be the centerfold of barriers to AI adoption. Therefore, our discussions have focused on these 3 domains with recommendations on how organizations can address these domains to facilitate adoption.</p>
      </sec>
      <sec>
        <title>Transparency and Explainability</title>
        <p>Our findings revealed that explainability in the context of algorithmic transparency is a significant barrier to adoption. Various studies noted that limitations due to the opacity of an algorithm may inhibit clinicians from relying on ML outputs in clinical settings. This leads to ambiguity on whether the ML output can be trusted enough for the clinician to move forward with the clinical decision-making or should be overridden due to a lack of certitude or misalignment with traditional clinical judgment [<xref ref-type="bibr" rid="ref80">80</xref>]. AI explainability (XAI) is an entire field dedicated to ensuring trustworthy and explainable AI. There are numerous publications from this field. For example, Markus et al [<xref ref-type="bibr" rid="ref81">81</xref>] noted that explanations are crucial to involving a human in the process of verifying the decision of the algorithm, for example, by revealing what features were used in training the AI algorithm. Adadi and Berrada [<xref ref-type="bibr" rid="ref82">82</xref>] have conducted a comprehensive review of existing evidence on explainability approaches and organized them from different perspectives. They specifically outline 4 guidelines for the need for explainable AI. Explain to justify: the decisions made by using an underlying model should be explained to increase their justifiability. Explain to control: explanations should enhance the transparency of a model and its functioning, allowing its debugging and the identification of potential flaws. Explain to improve: explanations should help improve the accuracy and efficiency of their models. Explain to discover: explanations should support the extraction of novel knowledge and the learning of relationships and patterns to manage social interaction and create a shared meaning of the decision-making process.</p>
        <p>There are several techniques that organizations can adopt when aiming to achieve explainable AI. These include explainable modeling, evaluating for explainability, or following an explainability framework, as proposed by Markus et al [<xref ref-type="bibr" rid="ref81">81</xref>]. Preece [<xref ref-type="bibr" rid="ref83">83</xref>] and Vilone and Longo [<xref ref-type="bibr" rid="ref31">31</xref>] have done a thorough analysis of evaluation approaches for explainable AI. The inclusion of the combination of these techniques from XAI could be useful for organizations to address adoption barriers associated with explainability. It is prudent that organizations developing and implementing AI incorporate various explainable modeling approaches, include explainability frameworks, and consider explainability evaluation in their AI life cycle. In addition, part of this process should include equipping clinicians with knowledge about what the AI takes as input, how the input is processed, and what the AI produces as output, along with the training process used. In this way, clinician engagement is essential to the process of developing and validating AI algorithms and outputs. This approach will also empower clinicians to discuss these transparencies with patients, thereby contributing toward building trust on all fronts.</p>
      </sec>
      <sec>
        <title>Bias</title>
        <p>In terms of equity and fairness, our findings have demonstrated that algorithm bias is a critical factor in not only gaining trust but also having meaningful outputs that can be applied to diverse patients. Specific concerns related to adoption include models being trained on data not representative of the patient population or not containing diverse data as related to social determinants of health [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]. Bias in AI systems can be introduced due to biased data, algorithms being trained on the biased data, limitations in the model itself, small training size, lack of user participation, and other unseen factors [<xref ref-type="bibr" rid="ref84">84</xref>]. There are a number of examples of specific issues related to bias in AI systems. Buolamwini and Gebru [<xref ref-type="bibr" rid="ref85">85</xref>] reported that an artificial vision algorithm was unable to identify dark-skinned individuals, as &#62;80% of the individuals in a reference data set were light-skinned individuals. Another failed case is found in the field of anesthesiology, where data from 40 institutions revealed that Black patients received inferior care (with respect to postoperative nausea and vomiting prophylaxis) at nearly every single center [<xref ref-type="bibr" rid="ref86">86</xref>]. Seyyed-Kalantari et al [<xref ref-type="bibr" rid="ref87">87</xref>] noted that convolutional neural networks will frequently underdiagnose Hispanic patients at a disproportionate rate due to the potential lack of access to health care and insurance type. In the field of mental health, specifically concerning schizophrenia, a meta-analysis found that risk-flagging models trained on European populations have reduced performance in East Asian populations [<xref ref-type="bibr" rid="ref88">88</xref>].</p>
        <p>According to Panch et al [<xref ref-type="bibr" rid="ref46">46</xref>], several challenges need to be addressed when addressing algorithmic bias. They include a lack of clear definitions and standards of “fairness,” insufficient contextual specificity, and the “black-box” nature of algorithms. These can be addressed by developing algorithms based on where they will be deployed by first establishing and identifying these contexts and ensuring processes are in place to address these challenges. There are numerous solutions to address bias that emphasize the risk of bias mitigation techniques to be applied at each stage of model development. For example, Chen et al [<xref ref-type="bibr" rid="ref89">89</xref>] and O’Reilly-Shah [<xref ref-type="bibr" rid="ref90">90</xref>] recommend that at the preprocessing stage, where the data may have internalized biases, techniques such as reweighing data samples of marginalized groups or resampling based on the population that algorithm output would be applied to. These techniques could help address the adoption barriers identified in this review, particularly those around underrepresentative and inaccurate training data sets. Similarly, at the postprocessing stage, similar thresholds for different representative groups could be set for the model to be monitored, adjusted, and trained. The design of the algorithm should consider equity via training data sets that are either diverse or focused to fit the localized population. Another major concern is data set drift, which means that the data set the model was trained with is different from the test data set. There are various techniques to mitigate data set shift, and these techniques should be considered in model development. Another mitigation technique, as recommended by Chen et al [<xref ref-type="bibr" rid="ref89">89</xref>], is federated learning, where a model is trained on a global server. This technique allows for models to be trained on large data sets without sharing sensitive information. Aside from more quantitative techniques to address the risk of bias, there are assessments available that can be used as a checklist during each stage of AI development. While these tools can address statistical bias, it is much more challenging to identify social bias that can intrude into the data. Frameworks such as the one developed by Landers and Behrend [<xref ref-type="bibr" rid="ref91">91</xref>] comprehensively outline questions that would be asked at each stage of AI development. These questions focus on information, perceptions, and other social and cultural components. Such tools, when integrated into the AI development process, would help gather evidence that could be shared with clinicians and patients on how bias mitigation has been considered in the end-to-end development process, thereby addressing adoption concerns around bias identified in this review.</p>
      </sec>
      <sec>
        <title>Ethics</title>
        <p>Gerke et al [<xref ref-type="bibr" rid="ref92">92</xref>] have discussed four primary ethical challenges that need to be addressed to realize the full potential of AI in health care: (1) informed consent to use, (2) safety and transparency, (3) algorithmic fairness and biases, and (4) data privacy. These challenges resonate with our findings around barriers to adoption and, interestingly, tie in these elements of barriers to adoption under the ethics domain.</p>
        <p>There are several cases of ethical concerns that highlight the need for ethics. In the context of ethical concerns around informed consent and data privacy, in 2017, the personal data of approximately 1.6 million patients were provided to Google DeepMind by Royal Free National Health Service Foundation Trust without the patients’ consent. The data were to be used to test a new way of detecting kidney injuries [<xref ref-type="bibr" rid="ref93">93</xref>]. From a clinician’s perspective, there are concerns around what the clinician’s responsibility is in informing patients about the use of AI in their care [<xref ref-type="bibr" rid="ref92">92</xref>]. In the context of algorithmic safety and bias, Buolamwini and Gebru [<xref ref-type="bibr" rid="ref85">85</xref>] and Liao [<xref ref-type="bibr" rid="ref94">94</xref>] note ethical concerns around algorithms not detecting dark-skinned individuals for skin cancer detection due to the fact that the algorithm was trained on light-skinned individuals. Similarly, an algorithm that is widely used in US hospitals to identify patients who need additional care was found to use the cost expenditure by patients as a means to identify those who need extra care. This was discriminatory toward Black patients, as they generally spend less than White patients on health care, resulting in false conclusions [<xref ref-type="bibr" rid="ref95">95</xref>].</p>
        <p>In terms of safety and transparency, Liao [<xref ref-type="bibr" rid="ref94">94</xref>] provides a good explanation as to why the lack of safety and transparency is an issue. They provide an example of a prediction of a 70% chance for a supposed patient’s tumor to become malignant in 5 years; however, the algorithm does not necessarily provide detailed reasons as to how it arrived at the conclusion. From an ethical standpoint, this is an issue because humans need to know how a decision is reached; specifically, in health care, not being able to understand and trust a decision is problematic.</p>
        <p>Which and, more importantly, how can organizations address ethical concerns? According to Liao [<xref ref-type="bibr" rid="ref94">94</xref>], there are &#62;80 ethical frameworks that have been proposed for AI. Many of these draw on the 4 principles of biomedical ethics, namely, autonomy, beneficence, nonmaleficence, and justice. Some of these frameworks provide practical checklists that organizations can use to conduct an ethics deliberation. For example, Solanki et al [<xref ref-type="bibr" rid="ref96">96</xref>] developed a comprehensive framework for AI developers that includes ethics oversight during different phases of the AI development life cycle. Similarly, Rogers et al [<xref ref-type="bibr" rid="ref97">97</xref>] shared a very practical approach to how they evaluated an AI model for ethics. Such tools are practical methods of assessing AI algorithms for ethical principles. Despite these practical approaches, Goirand et al [<xref ref-type="bibr" rid="ref98">98</xref>] note that operationalizing ethical frameworks for AI is challenging, as there is a need for contextualization due to different ethical issues present in different environments. Therefore, organizations have to consider these nuances and determine an ethics approach that would work best for their organization when evaluating each AI model.</p>
        <p>These frameworks and tools to develop trustworthy AI by addressing various barriers to adoption are also just beginning to emerge and be applied in real-life cases; however, they are a good start to the implementation journey of AI, especially those applied in clinical settings. Overall, our findings demonstrate that the adoption of an AI system has to be considered from its onset, when the system is being conceptualized, to when it is implemented and sustained. The existing technology implementation and acceptance models may not be all encompassing of adoption factors; therefore, adding additional frameworks around trust, bias, explainability, and ethics will be necessary to foresee the success of an AI innovation. A governance model may address concerns around risk, safety, and adoption barriers identified in this paper by facilitating the overall development process of AI and ensuring various checks and balances are in place. <xref rid="figure3" ref-type="fig">Figure 3</xref> is a visual depiction of the core elements that were found to impact trust, as discussed in this section.</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Adoption barriers, as related to trust.</p>
          </caption>
          <graphic xlink:href="humanfactors_v11i1e48633_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>Given the limited application of AI in health care at the time of this research, only a few number of papers that reported on implementation barriers and facilitators were reviewed to identify AI adoption barriers and facilitators. As the application of AI and types of AI systems in health care grows, a follow-up on adoption barriers and facilitators to assess for additional barriers and facilitators suitable to future environments may be necessary.</p>
      </sec>
      <sec>
        <title>Comparison With Prior Work</title>
        <p>At the time this search was conducted, a few literature reviews on the determinants of and barriers to AI adoption were conducted, such as the review by Radhakrishnan and Chattopadhyay [<xref ref-type="bibr" rid="ref26">26</xref>]. However, these studies span across multiple industries. For health care, one systematic review on the <italic>barriers</italic> to AI adoption in health care was conducted by Assadullah [<xref ref-type="bibr" rid="ref27">27</xref>]. However, there is less work that considers both the <italic>barriers to and facilitators</italic> of AI adoption in health care at large. Therefore, this review has attempted to explore the latter to provide considerations for health care organizations looking to successfully implement AI technologies via increased adoption. Our findings are validated due to the replication of several themes identified in similar, previous research by Assadullah [<xref ref-type="bibr" rid="ref27">27</xref>]. Common themes identified around barriers to adoption included explainability, trust issues centered on privacy, challenges around data ownership, lack of regulatory standards, issue of bias, and lack of accountability. Overall, the issue of trust was found to be centered on bias, ethics, and explainability, which led to a lack of accountability and an inability to evaluate. Other issues impeding trust included impacts on model performance leading to inaccurate results. These findings around trust resonate with results from this research, reinforcing the barriers to adoption identified in both studies.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>This literature review revealed that trust is impacted by a number of elements identified as barriers and that trust is a significant catalyst of adoption. A governance structure can be a key facilitator in ensuring all the elements identified as barriers are addressed appropriately. The findings demonstrate that the implementation of AI in health care is still in many ways dependent on the establishment of regulatory and legal frameworks. Further research around the combination of governance and implementation frameworks, models, or theories to enhance trust that would specifically enable adoption is needed to provide the necessary guidance to those translating AI research into practice. Future research could also be expanded to include attempts at understanding patients’ perspectives on complex, high-risk AI use cases and how the use of AI applications affect clinical practice and patient care, including sociotechnical considerations, as more algorithms are implemented in actual clinical environments.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>PRISMA checklist.</p>
        <media xlink:href="humanfactors_v11i1e48633_app1.pdf" xlink:title="PDF File  (Adobe PDF File), 166 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">DL</term>
          <def>
            <p>deep learning</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">EHR</term>
          <def>
            <p>electronic health record</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">EMR</term>
          <def>
            <p>electronic medical record</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">ML</term>
          <def>
            <p>machine learning</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">NLP</term>
          <def>
            <p>natural language processing</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">PRISMA</term>
          <def>
            <p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>EB received a Michael Smith Health Research BC Health Professional Investigator Award (grant HPI-2018-2045). This project received no specific grant from any funding agency in the public, commercial, or not-for-profit sector.</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>AK is editor in chief of <italic>JMIR Human Factors</italic>, but was not involved in the processing of this article. EB is the editor in chief of <italic>JMIR Nursing</italic>, but was not involved in the processing of this article.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="web">
          <article-title>2022 global health care outlook</article-title>
          <source>Deloitte</source>
          <year>2024</year>
          <month>5</month>
          <day>27</day>
          <access-date>2024-05-28</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.deloitte.com/global/en/Industries/life-sciences-health-care/perspectives/global-health-care-sector-outlook.html">https://www.deloitte.com/global/en/Industries/life-sciences-health-care/perspectives/global-health-care-sector-outlook.html</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="web">
          <article-title>Key strategic priorities</article-title>
          <source>CIFAR</source>
          <access-date>2022-02-15</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://cifar.ca/ai/key-strategic-priorities/#topskipToContent">https://cifar.ca/ai/key-strategic-priorities/#topskipToContent</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hamet</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Tremblay</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in medicine</article-title>
          <source>Metabolism</source>
          <year>2017</year>
          <month>04</month>
          <volume>69S</volume>
          <fpage>S36</fpage>
          <lpage>40</lpage>
          <pub-id pub-id-type="doi">10.1016/j.metabol.2017.01.011</pub-id>
          <pub-id pub-id-type="medline">28126242</pub-id>
          <pub-id pub-id-type="pii">S0026-0495(17)30015-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="web">
          <article-title>The age of AI: what exactly is AI?</article-title>
          <source>Deloitte Malta</source>
          <access-date>2024-04-29</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www2.deloitte.com/mt/en/pages/rpa-and-ai/articles/mt-age-of-ai-2-what-is-it.html">https://www2.deloitte.com/mt/en/pages/rpa-and-ai/articles/mt-age-of-ai-2-what-is-it.html</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tekkeşin</surname>
              <given-names>Aİ</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in healthcare: past, present and future</article-title>
          <source>Anatol J Cardiol</source>
          <year>2019</year>
          <month>10</month>
          <volume>22</volume>
          <issue>Suppl 2</issue>
          <fpage>8</fpage>
          <lpage>9</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.14744/AnatolJCardiol.2019.28661"/>
          </comment>
          <pub-id pub-id-type="doi">10.14744/AnatolJCardiol.2019.28661</pub-id>
          <pub-id pub-id-type="medline">31670713</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>PourNejatian</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Shin</surname>
              <given-names>HC</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>KE</given-names>
            </name>
            <name name-style="western">
              <surname>Parisien</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Compas</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Martin</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Costa</surname>
              <given-names>AB</given-names>
            </name>
            <name name-style="western">
              <surname>Flores</surname>
              <given-names>MG</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Magoc</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Harle</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Lipori</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Mitchell</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Hogan</surname>
              <given-names>WR</given-names>
            </name>
            <name name-style="western">
              <surname>Shenkman</surname>
              <given-names>EA</given-names>
            </name>
            <name name-style="western">
              <surname>Bian</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>A large language model for electronic health records</article-title>
          <source>NPJ Digit Med</source>
          <year>2022</year>
          <month>12</month>
          <day>26</day>
          <volume>5</volume>
          <issue>1</issue>
          <fpage>194</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-022-00742-2"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-022-00742-2</pub-id>
          <pub-id pub-id-type="medline">36572766</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41746-022-00742-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC9792464</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Secinaro</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Calandra</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Secinaro</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Muthurangu</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Biancone</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>The role of artificial intelligence in healthcare: a structured literature review</article-title>
          <source>BMC Med Inform Decis Mak</source>
          <year>2021</year>
          <month>04</month>
          <day>10</day>
          <volume>21</volume>
          <issue>1</issue>
          <fpage>125</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcmedinformdecismak.biomedcentral.com/articles/10.1186/s12911-021-01488-9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12911-021-01488-9</pub-id>
          <pub-id pub-id-type="medline">33836752</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12911-021-01488-9</pub-id>
          <pub-id pub-id-type="pmcid">PMC8035061</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tran</surname>
              <given-names>BX</given-names>
            </name>
            <name name-style="western">
              <surname>Vu</surname>
              <given-names>GT</given-names>
            </name>
            <name name-style="western">
              <surname>Ha</surname>
              <given-names>GH</given-names>
            </name>
            <name name-style="western">
              <surname>Vuong</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Ho</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Vuong</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>La</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Ho</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Nghiem</surname>
              <given-names>KP</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>HL</given-names>
            </name>
            <name name-style="western">
              <surname>Latkin</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Tam</surname>
              <given-names>WW</given-names>
            </name>
            <name name-style="western">
              <surname>Cheung</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>HT</given-names>
            </name>
            <name name-style="western">
              <surname>Ho</surname>
              <given-names>CS</given-names>
            </name>
            <name name-style="western">
              <surname>Ho</surname>
              <given-names>RC</given-names>
            </name>
          </person-group>
          <article-title>Global evolution of research in artificial intelligence in health and medicine: a bibliometric study</article-title>
          <source>J Clin Med</source>
          <year>2019</year>
          <month>03</month>
          <day>14</day>
          <volume>8</volume>
          <issue>3</issue>
          <fpage>360</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=jcm8030360"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/jcm8030360</pub-id>
          <pub-id pub-id-type="medline">30875745</pub-id>
          <pub-id pub-id-type="pii">jcm8030360</pub-id>
          <pub-id pub-id-type="pmcid">PMC6463262</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Davenport</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Kalakota</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>The potential for artificial intelligence in healthcare</article-title>
          <source>Future Healthc J</source>
          <year>2019</year>
          <month>06</month>
          <day>13</day>
          <volume>6</volume>
          <issue>2</issue>
          <fpage>94</fpage>
          <lpage>8</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2514-6645(24)01059-2"/>
          </comment>
          <pub-id pub-id-type="doi">10.7861/futurehosp.6-2-94</pub-id>
          <pub-id pub-id-type="medline">31363513</pub-id>
          <pub-id pub-id-type="pii">S2514-6645(24)01059-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC6616181</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Wah Ho</surname>
              <given-names>AF</given-names>
            </name>
            <name name-style="western">
              <surname>Ong</surname>
              <given-names>ME</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in emergency medicine</article-title>
          <source>J Emerg Crit Care Med</source>
          <year>2018</year>
          <month>10</month>
          <volume>2</volume>
          <fpage>82</fpage>
          <pub-id pub-id-type="doi">10.21037/jeccm.2018.10.08</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kirubarajan</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Taher</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Masood</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in emergency medicine: a scoping review</article-title>
          <source>J Am Coll Emerg Physicians Open</source>
          <year>2020</year>
          <month>12</month>
          <day>07</day>
          <volume>1</volume>
          <issue>6</issue>
          <fpage>1691</fpage>
          <lpage>702</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/33392578"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/emp2.12277</pub-id>
          <pub-id pub-id-type="medline">33392578</pub-id>
          <pub-id pub-id-type="pii">EMP212277</pub-id>
          <pub-id pub-id-type="pmcid">PMC7771825</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Varner</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Can artificial intelligence predict emergency department crowding?</article-title>
          <source>Healthy Debate</source>
          <access-date>2024-04-29</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://healthydebate.ca/2020/03/topic/predicting-emergency-department-crowding-artificial-intelligence-mar2020/">https://healthydebate.ca/2020/03/topic/predicting-emergency-department-crowding-artificial-intelligence-mar2020/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cuttler</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Transforming health care: how artificial intelligence is reshaping the medical landscape</article-title>
          <source>CBC News</source>
          <year>2019</year>
          <access-date>2024-04-29</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.cbc.ca/news/health/artificial-intelligence-health-care-1.5110892">https://www.cbc.ca/news/health/artificial-intelligence-health-care-1.5110892</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Briganti</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Le Moine</surname>
              <given-names>O</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in medicine: today and tomorrow</article-title>
          <source>Front Med (Lausanne)</source>
          <year>2020</year>
          <month>2</month>
          <day>5</day>
          <volume>7</volume>
          <fpage>27</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32118012"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fmed.2020.00027</pub-id>
          <pub-id pub-id-type="medline">32118012</pub-id>
          <pub-id pub-id-type="pmcid">PMC7012990</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>KH</given-names>
            </name>
            <name name-style="western">
              <surname>Beam</surname>
              <given-names>AL</given-names>
            </name>
            <name name-style="western">
              <surname>Kohane</surname>
              <given-names>IS</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in healthcare</article-title>
          <source>Nat Biomed Eng</source>
          <year>2018</year>
          <month>10</month>
          <day>10</day>
          <volume>2</volume>
          <issue>10</issue>
          <fpage>719</fpage>
          <lpage>31</lpage>
          <pub-id pub-id-type="doi">10.1038/s41551-018-0305-z</pub-id>
          <pub-id pub-id-type="medline">31015651</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41551-018-0305-z</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>AC</given-names>
            </name>
          </person-group>
          <article-title>Big data in medicine: the upcoming artificial intelligence</article-title>
          <source>Prog Pediatr Cardiol</source>
          <year>2016</year>
          <month>12</month>
          <volume>43</volume>
          <fpage>91</fpage>
          <lpage>4</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ppedcard.2016.08.021</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="web">
          <article-title>The healthcare data explosion</article-title>
          <source>RBC Capital Markets</source>
          <access-date>2024-04-29</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.rbccm.com/en/gib/healthcare/episode/the_healthcare_data_explosion">https://www.rbccm.com/en/gib/healthcare/episode/the_healthcare_data_explosion</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Reinsel</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Gantz</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rydning</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>The digitization of the world: from edge to core</article-title>
          <source>Seagate</source>
          <year>2018</year>
          <access-date>2024-04-29</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.seagate.com/files/www-content/our-story/trends/files/idc-seagate-dataage-whitepaper.pdf">https://www.seagate.com/files/www-content/our-story/trends/files/idc-seagate-dataage-whitepaper.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Romero-Brufau</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wyatt</surname>
              <given-names>KD</given-names>
            </name>
            <name name-style="western">
              <surname>Boyum</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Mickelson</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Moore</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Cognetta-Rieke</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>A lesson in implementation: a pre-post study of providers' experience with artificial intelligence-based clinical decision support</article-title>
          <source>Int J Med Inform</source>
          <year>2020</year>
          <month>05</month>
          <volume>137</volume>
          <fpage>104072</fpage>
          <pub-id pub-id-type="doi">10.1016/j.ijmedinf.2019.104072</pub-id>
          <pub-id pub-id-type="medline">32200295</pub-id>
          <pub-id pub-id-type="pii">S1386-5056(19)31012-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shaw</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rudzicz</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Jamieson</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Goldfarb</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence and the implementation challenge</article-title>
          <source>J Med Internet Res</source>
          <year>2019</year>
          <month>07</month>
          <day>10</day>
          <volume>21</volume>
          <issue>7</issue>
          <fpage>e13659</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2019/7/e13659/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/13659</pub-id>
          <pub-id pub-id-type="medline">31293245</pub-id>
          <pub-id pub-id-type="pii">v21i7e13659</pub-id>
          <pub-id pub-id-type="pmcid">PMC6652121</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kelly</surname>
              <given-names>CJ</given-names>
            </name>
            <name name-style="western">
              <surname>Karthikesalingam</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Suleyman</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Corrado</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>King</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Key challenges for delivering clinical impact with artificial intelligence</article-title>
          <source>BMC Med</source>
          <year>2019</year>
          <month>10</month>
          <day>29</day>
          <volume>17</volume>
          <issue>1</issue>
          <fpage>195</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcmedicine.biomedcentral.com/articles/10.1186/s12916-019-1426-2"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12916-019-1426-2</pub-id>
          <pub-id pub-id-type="medline">31665002</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12916-019-1426-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC6821018</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Leviss</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>HIT or Miss – studying failures to enable success</article-title>
          <source>Appl Clin Inform</source>
          <year>2017</year>
          <month>12</month>
          <day>16</day>
          <volume>02</volume>
          <issue>03</issue>
          <fpage>345</fpage>
          <lpage>9</lpage>
          <pub-id pub-id-type="doi">10.4338/aci-2011-03-ie-0020</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Venkatesh</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Morris</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Davis</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Davis</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>User acceptance of information technology: toward a unified view</article-title>
          <source>MIS Q</source>
          <year>2003</year>
          <volume>27</volume>
          <issue>3</issue>
          <fpage>425</fpage>
          <pub-id pub-id-type="doi">10.2307/30036540</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Greenhalgh</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Abimbola</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>The NASSS framework - a synthesis of multiple theories of technology implementation</article-title>
          <source>Stud Health Technol Inform</source>
          <year>2019</year>
          <month>07</month>
          <day>30</day>
          <volume>263</volume>
          <fpage>193</fpage>
          <lpage>204</lpage>
          <pub-id pub-id-type="doi">10.3233/SHTI190123</pub-id>
          <pub-id pub-id-type="medline">31411163</pub-id>
          <pub-id pub-id-type="pii">SHTI190123</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cresswell</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Sheikh</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Organizational issues in the implementation and adoption of health information technology innovations: an interpretative review</article-title>
          <source>Int J Med Inform</source>
          <year>2013</year>
          <month>05</month>
          <volume>82</volume>
          <issue>5</issue>
          <fpage>e73</fpage>
          <lpage>86</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ijmedinf.2012.10.007</pub-id>
          <pub-id pub-id-type="medline">23146626</pub-id>
          <pub-id pub-id-type="pii">S1386-5056(12)00199-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Radhakrishnan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chattopadhyay</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Determinants and barriers of artificial intelligence adoption – a literature review</article-title>
          <source>Proceedings of the 2020 International Conference on Transfer and Diffusion of IT, TDIT</source>
          <year>2020</year>
          <conf-name>TDIT '20</conf-name>
          <conf-date>December 18-19, 2020</conf-date>
          <conf-loc>Tiruchirappalli, India</conf-loc>
          <fpage>89</fpage>
          <lpage>99</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://link.springer.com/chapter/10.1007/978-3-030-64849-7_9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/978-3-030-64849-7_9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Assadullah</surname>
              <given-names>MM</given-names>
            </name>
          </person-group>
          <article-title>Barriers to artificial intelligence adoption in healthcare management: a systematic review</article-title>
          <source>SSRN Journal. Preprint posted online March 4, 2020</source>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ssrn.com/abstract=3530598"/>
          </comment>
          <pub-id pub-id-type="doi">10.2139/ssrn.3530598</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Arksey</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>O'Malley</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Scoping studies: towards a methodological framework</article-title>
          <source>Int J Soc Res Methodol</source>
          <year>2005</year>
          <month>02</month>
          <volume>8</volume>
          <issue>1</issue>
          <fpage>19</fpage>
          <lpage>32</lpage>
          <pub-id pub-id-type="doi">10.1080/1364557032000119616</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hosseini</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Jahanshahlou</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Akbarzadeh</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Zarei</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Vaez-Gharamaleki</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Formulating research questions for evidence-based studies</article-title>
          <source>J Med Surg Public Health</source>
          <year>2024</year>
          <month>04</month>
          <volume>2</volume>
          <fpage>100046</fpage>
          <pub-id pub-id-type="doi">10.1016/j.glmedi.2023.100046</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hsieh</surname>
              <given-names>HF</given-names>
            </name>
            <name name-style="western">
              <surname>Shannon</surname>
              <given-names>SE</given-names>
            </name>
          </person-group>
          <article-title>Three approaches to qualitative content analysis</article-title>
          <source>Qual Health Res</source>
          <year>2005</year>
          <month>11</month>
          <volume>15</volume>
          <issue>9</issue>
          <fpage>1277</fpage>
          <lpage>88</lpage>
          <pub-id pub-id-type="doi">10.1177/1049732305276687</pub-id>
          <pub-id pub-id-type="medline">16204405</pub-id>
          <pub-id pub-id-type="pii">15/9/1277</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vilone</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Longo</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Notions of explainability and evaluation approaches for explainable artificial intelligence</article-title>
          <source>Inf Fusion</source>
          <year>2021</year>
          <month>12</month>
          <volume>76</volume>
          <fpage>89</fpage>
          <lpage>106</lpage>
          <pub-id pub-id-type="doi">10.1016/j.inffus.2021.05.009</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Brennan</surname>
              <given-names>HL</given-names>
            </name>
            <name name-style="western">
              <surname>Kirby</surname>
              <given-names>SD</given-names>
            </name>
          </person-group>
          <article-title>Barriers of artificial intelligence implementation in the diagnosis of obstructive sleep apnea</article-title>
          <source>J Otolaryngol Head Neck Surg</source>
          <year>2022</year>
          <month>04</month>
          <day>25</day>
          <volume>51</volume>
          <issue>1</issue>
          <fpage>16</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1186/s40463-022-00566-w?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s40463-022-00566-w</pub-id>
          <pub-id pub-id-type="medline">35468865</pub-id>
          <pub-id pub-id-type="pii">10.1186/s40463-022-00566-w</pub-id>
          <pub-id pub-id-type="pmcid">PMC9036782</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Holzinger</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Langs</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Denk</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Zatloukal</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Müller</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Causability and explainability of artificial intelligence in medicine</article-title>
          <source>Wiley Interdiscip Rev Data Min Knowl Discov</source>
          <year>2019</year>
          <month>04</month>
          <day>02</day>
          <volume>9</volume>
          <issue>4</issue>
          <fpage>e1312</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32089788"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/widm.1312</pub-id>
          <pub-id pub-id-type="medline">32089788</pub-id>
          <pub-id pub-id-type="pii">WIDM1312</pub-id>
          <pub-id pub-id-type="pmcid">PMC7017860</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Larsson</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Heintz</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Transparency in artificial intelligence</article-title>
          <source>Internet Policy Rev</source>
          <year>2020</year>
          <volume>9</volume>
          <issue>2</issue>
          <pub-id pub-id-type="doi">10.14763/2020.2.1469</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Baxter</surname>
              <given-names>SL</given-names>
            </name>
            <name name-style="western">
              <surname>Bass</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Sitapati</surname>
              <given-names>AM</given-names>
            </name>
          </person-group>
          <article-title>Barriers to implementing an artificial intelligence model for unplanned readmissions</article-title>
          <source>ACI open</source>
          <year>2020</year>
          <month>07</month>
          <day>19</day>
          <volume>4</volume>
          <issue>2</issue>
          <fpage>e108</fpage>
          <lpage>13</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/33274314"/>
          </comment>
          <pub-id pub-id-type="doi">10.1055/s-0040-1716748</pub-id>
          <pub-id pub-id-type="medline">33274314</pub-id>
          <pub-id pub-id-type="pmcid">PMC7710326</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Morrison</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence and the NHS: a qualitative exploration of the factors influencing adoption</article-title>
          <source>Future Healthc J</source>
          <year>2021</year>
          <month>11</month>
          <day>02</day>
          <volume>8</volume>
          <issue>3</issue>
          <fpage>e648</fpage>
          <lpage>54</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2514-6645(24)00122-X"/>
          </comment>
          <pub-id pub-id-type="doi">10.7861/fhj.2020-0258</pub-id>
          <pub-id pub-id-type="medline">34888459</pub-id>
          <pub-id pub-id-type="pii">S2514-6645(24)00122-X</pub-id>
          <pub-id pub-id-type="pmcid">PMC8651325</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Laï</surname>
              <given-names>MC</given-names>
            </name>
            <name name-style="western">
              <surname>Brian</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mamzer</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Perceptions of artificial intelligence in healthcare: findings from a qualitative survey study among actors in France</article-title>
          <source>J Transl Med</source>
          <year>2020</year>
          <month>01</month>
          <day>09</day>
          <volume>18</volume>
          <issue>1</issue>
          <fpage>14</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://translational-medicine.biomedcentral.com/articles/10.1186/s12967-019-02204-y"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12967-019-02204-y</pub-id>
          <pub-id pub-id-type="medline">31918710</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12967-019-02204-y</pub-id>
          <pub-id pub-id-type="pmcid">PMC6953249</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Moorman</surname>
              <given-names>LP</given-names>
            </name>
          </person-group>
          <article-title>Principles for real-world implementation of bedside predictive analytics monitoring</article-title>
          <source>Appl Clin Inform</source>
          <year>2021</year>
          <month>08</month>
          <day>22</day>
          <volume>12</volume>
          <issue>4</issue>
          <fpage>888</fpage>
          <lpage>96</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://www.thieme-connect.com/DOI/DOI?10.1055/s-0041-1735183"/>
          </comment>
          <pub-id pub-id-type="doi">10.1055/s-0041-1735183</pub-id>
          <pub-id pub-id-type="medline">34553360</pub-id>
          <pub-id pub-id-type="pmcid">PMC8458037</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nadarzynski</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Miles</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Cowie</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ridge</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Acceptability of artificial intelligence (AI)-led chatbot services in healthcare: a mixed-methods study</article-title>
          <source>Digit Health</source>
          <year>2019</year>
          <volume>5</volume>
          <fpage>2055207619871808</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/abs/10.1177/2055207619871808?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/2055207619871808</pub-id>
          <pub-id pub-id-type="medline">31467682</pub-id>
          <pub-id pub-id-type="pii">10.1177_2055207619871808</pub-id>
          <pub-id pub-id-type="pmcid">PMC6704417</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Watson</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Hutyra</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Clancy</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Chandiramani</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bedoya</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ilangovan</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Nderitu</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Poon</surname>
              <given-names>EG</given-names>
            </name>
          </person-group>
          <article-title>Overcoming barriers to the adoption and implementation of predictive modeling and machine learning in clinical care: what can we learn from US academic medical centers?</article-title>
          <source>JAMIA Open</source>
          <year>2020</year>
          <month>07</month>
          <volume>3</volume>
          <issue>2</issue>
          <fpage>167</fpage>
          <lpage>72</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32734155"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/jamiaopen/ooz046</pub-id>
          <pub-id pub-id-type="medline">32734155</pub-id>
          <pub-id pub-id-type="pii">ooz046</pub-id>
          <pub-id pub-id-type="pmcid">PMC7382631</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>JP</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Ting</surname>
              <given-names>DS</given-names>
            </name>
            <name name-style="western">
              <surname>Jeon</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chan</surname>
              <given-names>RP</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>JE</given-names>
            </name>
            <name name-style="western">
              <surname>Sim</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Thomas</surname>
              <given-names>PB</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Sakomoto</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Loewenstein</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lam</surname>
              <given-names>DS</given-names>
            </name>
            <name name-style="western">
              <surname>Pasquale</surname>
              <given-names>LR</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>TY</given-names>
            </name>
            <name name-style="western">
              <surname>Lam</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Ting</surname>
              <given-names>DS</given-names>
            </name>
          </person-group>
          <article-title>Digital technology, tele-medicine and artificial intelligence in ophthalmology: a global perspective</article-title>
          <source>Prog Retin Eye Res</source>
          <year>2021</year>
          <month>05</month>
          <volume>82</volume>
          <fpage>100900</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32898686"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.preteyeres.2020.100900</pub-id>
          <pub-id pub-id-type="medline">32898686</pub-id>
          <pub-id pub-id-type="pii">S1350-9462(20)30072-0</pub-id>
          <pub-id pub-id-type="pmcid">PMC7474840</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sangers</surname>
              <given-names>TE</given-names>
            </name>
            <name name-style="western">
              <surname>Wakkee</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kramer-Noels</surname>
              <given-names>EC</given-names>
            </name>
            <name name-style="western">
              <surname>Nijsten</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Lugtenberg</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Views on mobile health apps for skin cancer screening in the general population: an in-depth qualitative exploration of perceived barriers and facilitators</article-title>
          <source>Br J Dermatol</source>
          <year>2021</year>
          <month>11</month>
          <day>05</day>
          <volume>185</volume>
          <issue>5</issue>
          <fpage>961</fpage>
          <lpage>9</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/33959945"/>
          </comment>
          <pub-id pub-id-type="doi">10.1111/bjd.20441</pub-id>
          <pub-id pub-id-type="medline">33959945</pub-id>
          <pub-id pub-id-type="pmcid">PMC9291092</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liyanage</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Liaw</surname>
              <given-names>ST</given-names>
            </name>
            <name name-style="western">
              <surname>Jonnagaddala</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Schreiber</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Kuziemsky</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Terry</surname>
              <given-names>AL</given-names>
            </name>
            <name name-style="western">
              <surname>de Lusignan</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in primary health care: perceptions, issues, and challenges</article-title>
          <source>Yearb Med Inform</source>
          <year>2019</year>
          <month>08</month>
          <day>25</day>
          <volume>28</volume>
          <issue>1</issue>
          <fpage>41</fpage>
          <lpage>6</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://www.thieme-connect.com/DOI/DOI?10.1055/s-0039-1677901"/>
          </comment>
          <pub-id pub-id-type="doi">10.1055/s-0039-1677901</pub-id>
          <pub-id pub-id-type="medline">31022751</pub-id>
          <pub-id pub-id-type="pmcid">PMC6697547</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gillner</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>We're implementing AI now, so why not ask us what to do? - how AI providers perceive and navigate the spread of diagnostic AI in complex healthcare systems</article-title>
          <source>Soc Sci Med</source>
          <year>2024</year>
          <month>01</month>
          <volume>340</volume>
          <fpage>116442</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://boris.unibe.ch/id/eprint/189658"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.socscimed.2023.116442</pub-id>
          <pub-id pub-id-type="medline">38029666</pub-id>
          <pub-id pub-id-type="pii">S0277-9536(23)00799-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Weinert</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Müller</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Svensson</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Heinze</surname>
              <given-names>O</given-names>
            </name>
          </person-group>
          <article-title>Perspective of information technology decision makers on factors influencing adoption and implementation of artificial intelligence technologies in 40 German hospitals: descriptive analysis</article-title>
          <source>JMIR Med Inform</source>
          <year>2022</year>
          <month>06</month>
          <day>15</day>
          <volume>10</volume>
          <issue>6</issue>
          <fpage>e34678</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://medinform.jmir.org/2022/6/e34678/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/34678</pub-id>
          <pub-id pub-id-type="medline">35704378</pub-id>
          <pub-id pub-id-type="pii">v10i6e34678</pub-id>
          <pub-id pub-id-type="pmcid">PMC9244653</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Panch</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Mattie</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Atun</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence and algorithmic bias: implications for health systems</article-title>
          <source>J Glob Health</source>
          <year>2019</year>
          <month>12</month>
          <volume>9</volume>
          <issue>2</issue>
          <fpage>010318</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/31788229"/>
          </comment>
          <pub-id pub-id-type="doi">10.7189/jogh.09.020318</pub-id>
          <pub-id pub-id-type="medline">31788229</pub-id>
          <pub-id pub-id-type="pii">jogh-09-020318</pub-id>
          <pub-id pub-id-type="pmcid">PMC6875681</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sunarti</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Fadzlul Rahman</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Naufal</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Risky</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Febriyanto</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Masnina</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in healthcare: opportunities and risk for future</article-title>
          <source>Gac Sanit</source>
          <year>2021</year>
          <volume>35 Suppl 1</volume>
          <fpage>S67</fpage>
          <lpage>70</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://www.elsevier.es/en/linksolver/ft/pii/S0213-9111(20)30278-8"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.gaceta.2020.12.019</pub-id>
          <pub-id pub-id-type="medline">33832631</pub-id>
          <pub-id pub-id-type="pii">S0213-9111(20)30278-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mlodzinski</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Wardi</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Viglione</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Nemati</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Crotty Alexander</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Malhotra</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Assessing barriers to implementation of machine learning and artificial intelligence-based tools in critical care: web-based survey study</article-title>
          <source>JMIR Perioper Med</source>
          <year>2023</year>
          <month>01</month>
          <day>27</day>
          <volume>6</volume>
          <fpage>e41056</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://periop.jmir.org/2023//e41056/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/41056</pub-id>
          <pub-id pub-id-type="medline">36705960</pub-id>
          <pub-id pub-id-type="pii">v6i1e41056</pub-id>
          <pub-id pub-id-type="pmcid">PMC10013679</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chua</surname>
              <given-names>IS</given-names>
            </name>
            <name name-style="western">
              <surname>Gaziel-Yablowitz</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Korach</surname>
              <given-names>ZT</given-names>
            </name>
            <name name-style="western">
              <surname>Kehl</surname>
              <given-names>KL</given-names>
            </name>
            <name name-style="western">
              <surname>Levitan</surname>
              <given-names>NA</given-names>
            </name>
            <name name-style="western">
              <surname>Arriaga</surname>
              <given-names>YE</given-names>
            </name>
            <name name-style="western">
              <surname>Jackson</surname>
              <given-names>GP</given-names>
            </name>
            <name name-style="western">
              <surname>Bates</surname>
              <given-names>DW</given-names>
            </name>
            <name name-style="western">
              <surname>Hassett</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in oncology: path to implementation</article-title>
          <source>Cancer Med</source>
          <year>2021</year>
          <month>06</month>
          <day>07</day>
          <volume>10</volume>
          <issue>12</issue>
          <fpage>4138</fpage>
          <lpage>49</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/33960708"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/cam4.3935</pub-id>
          <pub-id pub-id-type="medline">33960708</pub-id>
          <pub-id pub-id-type="pmcid">PMC8209596</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>TQ</given-names>
            </name>
            <name name-style="western">
              <surname>Medaglia</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Mapping the challenges of artificial intelligence in the public sector: evidence from public healthcare</article-title>
          <source>Gov Inf Q</source>
          <year>2019</year>
          <month>04</month>
          <volume>36</volume>
          <issue>2</issue>
          <fpage>368</fpage>
          <lpage>83</lpage>
          <pub-id pub-id-type="doi">10.1016/j.giq.2018.09.008</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Scandiffio</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Younus</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Jeyakumar</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Karsan</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Charow</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Salhia</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wiljer</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>The adoption of AI in mental health care-perspectives from mental health professionals: qualitative descriptive study</article-title>
          <source>JMIR Form Res</source>
          <year>2023</year>
          <month>12</month>
          <day>07</day>
          <volume>7</volume>
          <fpage>e47847</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://formative.jmir.org/2023//e47847/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/47847</pub-id>
          <pub-id pub-id-type="medline">38060307</pub-id>
          <pub-id pub-id-type="pii">v7i1e47847</pub-id>
          <pub-id pub-id-type="pmcid">PMC10739240</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ye</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Xue</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Psychosocial factors affecting artificial intelligence adoption in health care in China: cross-sectional study</article-title>
          <source>J Med Internet Res</source>
          <year>2019</year>
          <month>10</month>
          <day>17</day>
          <volume>21</volume>
          <issue>10</issue>
          <fpage>e14316</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2019/10/e14316/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/14316</pub-id>
          <pub-id pub-id-type="medline">31625950</pub-id>
          <pub-id pub-id-type="pii">v21i10e14316</pub-id>
          <pub-id pub-id-type="pmcid">PMC6913088</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xing</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Socio-technical barriers affecting large-scale deployment of AI-enabled wearable medical devices among the ageing population in China</article-title>
          <source>Technol Forecast Soc Change</source>
          <year>2021</year>
          <month>05</month>
          <volume>166</volume>
          <fpage>120609</fpage>
          <pub-id pub-id-type="doi">10.1016/j.techfore.2021.120609</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Petitgand</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Motulsky</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Denis</surname>
              <given-names>JL</given-names>
            </name>
            <name name-style="western">
              <surname>Régis</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Investigating the barriers to physician adoption of an artificial intelligence- based decision support system in emergency care: an interpretative qualitative study</article-title>
          <source>Stud Health Technol Inform</source>
          <year>2020</year>
          <month>06</month>
          <day>16</day>
          <volume>270</volume>
          <fpage>1001</fpage>
          <lpage>5</lpage>
          <pub-id pub-id-type="doi">10.3233/SHTI200312</pub-id>
          <pub-id pub-id-type="medline">32570532</pub-id>
          <pub-id pub-id-type="pii">SHTI200312</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Asan</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Mansouri</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Perspectives of patients with chronic diseases on future acceptance of AI-based home care systems: cross-sectional web-based survey study</article-title>
          <source>JMIR Hum Factors</source>
          <year>2023</year>
          <month>11</month>
          <day>06</day>
          <volume>10</volume>
          <fpage>e49788</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://humanfactors.jmir.org/2023//e49788/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/49788</pub-id>
          <pub-id pub-id-type="medline">37930780</pub-id>
          <pub-id pub-id-type="pii">v10i1e49788</pub-id>
          <pub-id pub-id-type="pmcid">PMC10660233</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref56">
        <label>56</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schepart</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Burton</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Durkin</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Fuller</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Charap</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Bhambri</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmad</surname>
              <given-names>FS</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence-enabled tools in cardiovascular medicine: a survey of current use, perceptions, and challenges</article-title>
          <source>Cardiovasc Digit Health J</source>
          <year>2023</year>
          <month>06</month>
          <volume>4</volume>
          <issue>3</issue>
          <fpage>101</fpage>
          <lpage>10</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2666-6936(23)00028-2"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.cvdhj.2023.04.003</pub-id>
          <pub-id pub-id-type="medline">37351333</pub-id>
          <pub-id pub-id-type="pii">S2666-6936(23)00028-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC10282011</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref57">
        <label>57</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Strohm</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Hehakaya</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ranschaert</surname>
              <given-names>ER</given-names>
            </name>
            <name name-style="western">
              <surname>Boon</surname>
              <given-names>WP</given-names>
            </name>
            <name name-style="western">
              <surname>Moors</surname>
              <given-names>EH</given-names>
            </name>
          </person-group>
          <article-title>Implementation of artificial intelligence (AI) applications in radiology: hindering and facilitating factors</article-title>
          <source>Eur Radiol</source>
          <year>2020</year>
          <month>10</month>
          <day>26</day>
          <volume>30</volume>
          <issue>10</issue>
          <fpage>5525</fpage>
          <lpage>32</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32458173"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s00330-020-06946-y</pub-id>
          <pub-id pub-id-type="medline">32458173</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00330-020-06946-y</pub-id>
          <pub-id pub-id-type="pmcid">PMC7476917</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref58">
        <label>58</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Helenason</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ekström</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Falk</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Papachristou</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Exploring the feasibility of an artificial intelligence based clinical decision support system for cutaneous melanoma detection in primary care - a mixed method study</article-title>
          <source>Scand J Prim Health Care</source>
          <year>2024</year>
          <month>03</month>
          <day>20</day>
          <volume>42</volume>
          <issue>1</issue>
          <fpage>51</fpage>
          <lpage>60</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37982736"/>
          </comment>
          <pub-id pub-id-type="doi">10.1080/02813432.2023.2283190</pub-id>
          <pub-id pub-id-type="medline">37982736</pub-id>
          <pub-id pub-id-type="pmcid">PMC10851794</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref59">
        <label>59</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vijayakumar</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>VV</given-names>
            </name>
            <name name-style="western">
              <surname>Leong</surname>
              <given-names>QY</given-names>
            </name>
            <name name-style="western">
              <surname>Hong</surname>
              <given-names>SJ</given-names>
            </name>
            <name name-style="western">
              <surname>Blasiak</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ho</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Physicians' perspectives on ai in clinical decision support systems: interview study of the CURATE. AI personalized dose optimization platform</article-title>
          <source>JMIR Hum Factors</source>
          <year>2023</year>
          <month>10</month>
          <day>30</day>
          <volume>10</volume>
          <fpage>e48476</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://humanfactors.jmir.org/2023//e48476/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/48476</pub-id>
          <pub-id pub-id-type="medline">37902825</pub-id>
          <pub-id pub-id-type="pii">v10i1e48476</pub-id>
          <pub-id pub-id-type="pmcid">PMC10644191</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref60">
        <label>60</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Temsah</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Aljamaan</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Malki</surname>
              <given-names>KH</given-names>
            </name>
            <name name-style="western">
              <surname>Alhasan</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Altamimi</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Aljarbou</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Bazuhair</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Alsubaihin</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Abdulmajeed</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Alshahrani</surname>
              <given-names>FS</given-names>
            </name>
            <name name-style="western">
              <surname>Temsah</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Alshahrani</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Eyadhy</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Alkhateeb</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Saddik</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Halwani</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Jamal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Tawfiq</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Eyadhy</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT and the future of digital health: a study on healthcare workers' perceptions and expectations</article-title>
          <source>Healthcare (Basel)</source>
          <year>2023</year>
          <month>06</month>
          <day>21</day>
          <volume>11</volume>
          <issue>13</issue>
          <fpage>1812</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=healthcare11131812"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/healthcare11131812</pub-id>
          <pub-id pub-id-type="medline">37444647</pub-id>
          <pub-id pub-id-type="pii">healthcare11131812</pub-id>
          <pub-id pub-id-type="pmcid">PMC10340744</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref61">
        <label>61</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Choudhury</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Factors influencing clinicians' willingness to use an AI-based clinical decision support system</article-title>
          <source>Front Digit Health</source>
          <year>2022</year>
          <month>8</month>
          <day>16</day>
          <volume>4</volume>
          <fpage>920662</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36339516"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fdgth.2022.920662</pub-id>
          <pub-id pub-id-type="medline">36339516</pub-id>
          <pub-id pub-id-type="pmcid">PMC9628998</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref62">
        <label>62</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Victor Mugabe</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Barriers and facilitators to the adoption of artificial intelligence in radiation oncology: a New Zealand study</article-title>
          <source>Tech Innov Patient Support Radiat Oncol</source>
          <year>2021</year>
          <month>06</month>
          <volume>18</volume>
          <fpage>16</fpage>
          <lpage>21</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2405-6324(21)00018-4"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.tipsro.2021.03.004</pub-id>
          <pub-id pub-id-type="medline">33981867</pub-id>
          <pub-id pub-id-type="pii">S2405-6324(21)00018-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC8085695</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref63">
        <label>63</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Goswami</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Jain</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Alam</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Deifalla</surname>
              <given-names>AF</given-names>
            </name>
            <name name-style="western">
              <surname>Ragab</surname>
              <given-names>AE</given-names>
            </name>
            <name name-style="western">
              <surname>Khargotra</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Exploring the antecedents of AI adoption for effective HRM practices in the Indian pharmaceutical sector</article-title>
          <source>Front Pharmacol</source>
          <year>2023</year>
          <month>11</month>
          <day>14</day>
          <volume>14</volume>
          <fpage>1215706</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/38034991"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fphar.2023.1215706</pub-id>
          <pub-id pub-id-type="medline">38034991</pub-id>
          <pub-id pub-id-type="pii">1215706</pub-id>
          <pub-id pub-id-type="pmcid">PMC10682089</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref64">
        <label>64</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Isbanner</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>O'Shaughnessy</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Steel</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Wilcock</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Carter</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>The adoption of artificial intelligence in health care and social services in Australia: findings from a methodologically innovative national survey of values and attitudes (the AVA-AI study)</article-title>
          <source>J Med Internet Res</source>
          <year>2022</year>
          <month>08</month>
          <day>22</day>
          <volume>24</volume>
          <issue>8</issue>
          <fpage>e37611</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2022/8/e37611/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/37611</pub-id>
          <pub-id pub-id-type="medline">35994331</pub-id>
          <pub-id pub-id-type="pii">v24i8e37611</pub-id>
          <pub-id pub-id-type="pmcid">PMC9446139</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref65">
        <label>65</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mahlknecht</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Engl</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Piccoliori</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Wiedermann</surname>
              <given-names>CJ</given-names>
            </name>
          </person-group>
          <article-title>Supporting primary care through symptom checking artificial intelligence: a study of patient and physician attitudes in Italian general practice</article-title>
          <source>BMC Prim Care</source>
          <year>2023</year>
          <month>09</month>
          <day>04</day>
          <volume>24</volume>
          <issue>1</issue>
          <fpage>174</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37661285"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12875-023-02143-0</pub-id>
          <pub-id pub-id-type="medline">37661285</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12875-023-02143-0</pub-id>
          <pub-id pub-id-type="pmcid">PMC10476397</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref66">
        <label>66</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kleine</surname>
              <given-names>AK</given-names>
            </name>
            <name name-style="western">
              <surname>Kokje</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Lermer</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Gaube</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Attitudes toward the adoption of 2 artificial intelligence-enabled mental health tools among prospective psychotherapists: cross-sectional study</article-title>
          <source>JMIR Hum Factors</source>
          <year>2023</year>
          <month>07</month>
          <day>12</day>
          <volume>10</volume>
          <fpage>e46859</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://humanfactors.jmir.org/2023//e46859/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/46859</pub-id>
          <pub-id pub-id-type="medline">37436801</pub-id>
          <pub-id pub-id-type="pii">v10i1e46859</pub-id>
          <pub-id pub-id-type="pmcid">PMC10372564</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref67">
        <label>67</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Choudhury</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Shamszare</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Investigating the impact of user trust on the adoption and use of ChatGPT: survey analysis</article-title>
          <source>J Med Internet Res</source>
          <year>2023</year>
          <month>06</month>
          <day>14</day>
          <volume>25</volume>
          <fpage>e47184</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2023//e47184/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/47184</pub-id>
          <pub-id pub-id-type="medline">37314848</pub-id>
          <pub-id pub-id-type="pii">v25i1e47184</pub-id>
          <pub-id pub-id-type="pmcid">PMC10337387</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref68">
        <label>68</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Stavropoulou</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Narasinkan</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Baker</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Scarbrough</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Professionals' responses to the introduction of AI innovations in radiology and their implications for future adoption: a qualitative study</article-title>
          <source>BMC Health Serv Res</source>
          <year>2021</year>
          <month>08</month>
          <day>14</day>
          <volume>21</volume>
          <issue>1</issue>
          <fpage>813</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmchealthservres.biomedcentral.com/articles/10.1186/s12913-021-06861-y"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12913-021-06861-y</pub-id>
          <pub-id pub-id-type="medline">34389014</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12913-021-06861-y</pub-id>
          <pub-id pub-id-type="pmcid">PMC8364018</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref69">
        <label>69</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ongena</surname>
              <given-names>YP</given-names>
            </name>
            <name name-style="western">
              <surname>Haan</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Yakar</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Kwee</surname>
              <given-names>TC</given-names>
            </name>
          </person-group>
          <article-title>Patients' views on the implementation of artificial intelligence in radiology: development and validation of a standardized questionnaire</article-title>
          <source>Eur Radiol</source>
          <year>2020</year>
          <month>02</month>
          <day>08</day>
          <volume>30</volume>
          <issue>2</issue>
          <fpage>1033</fpage>
          <lpage>40</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/31705254"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s00330-019-06486-0</pub-id>
          <pub-id pub-id-type="medline">31705254</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00330-019-06486-0</pub-id>
          <pub-id pub-id-type="pmcid">PMC6957541</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref70">
        <label>70</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Alsobhi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sachdev</surname>
              <given-names>HS</given-names>
            </name>
            <name name-style="western">
              <surname>Chevidikunnan</surname>
              <given-names>MF</given-names>
            </name>
            <name name-style="western">
              <surname>Basuodan</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Dhanesh Kumar</surname>
              <given-names>KU</given-names>
            </name>
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Facilitators and barriers of artificial intelligence applications in rehabilitation: a mixed-method approach</article-title>
          <source>Int J Environ Res Public Health</source>
          <year>2022</year>
          <month>11</month>
          <day>29</day>
          <volume>19</volume>
          <issue>23</issue>
          <fpage>15919</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=ijerph192315919"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/ijerph192315919</pub-id>
          <pub-id pub-id-type="medline">36497993</pub-id>
          <pub-id pub-id-type="pii">ijerph192315919</pub-id>
          <pub-id pub-id-type="pmcid">PMC9737928</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref71">
        <label>71</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hickman</surname>
              <given-names>SE</given-names>
            </name>
            <name name-style="western">
              <surname>Baxter</surname>
              <given-names>GC</given-names>
            </name>
            <name name-style="western">
              <surname>Gilbert</surname>
              <given-names>FJ</given-names>
            </name>
          </person-group>
          <article-title>Adoption of artificial intelligence in breast imaging: evaluation, ethical constraints and limitations</article-title>
          <source>Br J Cancer</source>
          <year>2021</year>
          <month>07</month>
          <day>26</day>
          <volume>125</volume>
          <issue>1</issue>
          <fpage>15</fpage>
          <lpage>22</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/33772149"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41416-021-01333-w</pub-id>
          <pub-id pub-id-type="medline">33772149</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41416-021-01333-w</pub-id>
          <pub-id pub-id-type="pmcid">PMC8257639</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref72">
        <label>72</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fan</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Pardalos</surname>
              <given-names>PM</given-names>
            </name>
          </person-group>
          <article-title>Investigating the impacting factors for the healthcare professionals to adopt artificial intelligence-based medical diagnosis support system (AIMDSS)</article-title>
          <source>Ann Oper Res</source>
          <year>2018</year>
          <month>03</month>
          <day>19</day>
          <volume>294</volume>
          <issue>1-2</issue>
          <fpage>567</fpage>
          <lpage>92</lpage>
          <pub-id pub-id-type="doi">10.1007/s10479-018-2818-y</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref73">
        <label>73</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hemphill</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Jackson</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Bradley</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bhartia</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>The implementation of artificial intelligence in radiology: a narrative review of patient perspectives</article-title>
          <source>Future Healthc J</source>
          <year>2023</year>
          <month>03</month>
          <volume>10</volume>
          <issue>1</issue>
          <fpage>63</fpage>
          <lpage>8</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2514-6645(24)00386-2"/>
          </comment>
          <pub-id pub-id-type="doi">10.7861/fhj.2022-0097</pub-id>
          <pub-id pub-id-type="medline">37786489</pub-id>
          <pub-id pub-id-type="pii">S2514-6645(24)00386-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC10538685</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref74">
        <label>74</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pou-Prom</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Murray</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kuzulugil</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mamdani</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Verma</surname>
              <given-names>AA</given-names>
            </name>
          </person-group>
          <article-title>From compute to care: lessons learned from deploying an early warning system into clinical practice</article-title>
          <source>Front Digit Health</source>
          <year>2022</year>
          <month>9</month>
          <day>5</day>
          <volume>4</volume>
          <fpage>932123</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36133802"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fdgth.2022.932123</pub-id>
          <pub-id pub-id-type="medline">36133802</pub-id>
          <pub-id pub-id-type="pmcid">PMC9483018</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref75">
        <label>75</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Goldstein</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Weitzman</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Lemerond</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Jones</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Determinants for scalable adoption of autonomous AI in the detection of diabetic eye disease in diverse practice types: key best practices learned through collection of real-world data</article-title>
          <source>Front Digit Health</source>
          <year>2023</year>
          <month>5</month>
          <day>18</day>
          <volume>5</volume>
          <fpage>1004130</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37274764"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fdgth.2023.1004130</pub-id>
          <pub-id pub-id-type="medline">37274764</pub-id>
          <pub-id pub-id-type="pmcid">PMC10232822</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref76">
        <label>76</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>TQ</given-names>
            </name>
          </person-group>
          <article-title>Adopting artificial intelligence in public healthcare: the effect of social power and learning algorithms</article-title>
          <source>Int J Environ Res Public Health</source>
          <year>2021</year>
          <month>12</month>
          <day>01</day>
          <volume>18</volume>
          <issue>23</issue>
          <fpage>12682</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=ijerph182312682"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/ijerph182312682</pub-id>
          <pub-id pub-id-type="medline">34886404</pub-id>
          <pub-id pub-id-type="pii">ijerph182312682</pub-id>
          <pub-id pub-id-type="pmcid">PMC8656642</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref77">
        <label>77</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Luo</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence healthcare service resources adoption by medical institutions based on TOE framework</article-title>
          <source>Digit Health</source>
          <year>2022</year>
          <month>10</month>
          <day>05</day>
          <volume>8</volume>
          <fpage>20552076221126034</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1177/20552076221126034?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/20552076221126034</pub-id>
          <pub-id pub-id-type="medline">36211801</pub-id>
          <pub-id pub-id-type="pii">10.1177_20552076221126034</pub-id>
          <pub-id pub-id-type="pmcid">PMC9537501</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref78">
        <label>78</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wolff</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Pauling</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Keck</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Baumbach</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Success factors of artificial intelligence implementation in healthcare</article-title>
          <source>Front Digit Health</source>
          <year>2021</year>
          <month>6</month>
          <day>16</day>
          <volume>3</volume>
          <fpage>594971</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/34713083"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fdgth.2021.594971</pub-id>
          <pub-id pub-id-type="medline">34713083</pub-id>
          <pub-id pub-id-type="pmcid">PMC8521923</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref79">
        <label>79</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Farič</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Hinder</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Williams</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Ramaesh</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Bernabeu</surname>
              <given-names>MO</given-names>
            </name>
            <name name-style="western">
              <surname>van Beek</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Cresswell</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Early experiences of integrating an artificial intelligence-based diagnostic decision support system into radiology settings: a qualitative study</article-title>
          <source>J Am Med Inform Assoc</source>
          <year>2023</year>
          <month>12</month>
          <day>22</day>
          <volume>31</volume>
          <issue>1</issue>
          <fpage>24</fpage>
          <lpage>34</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37748456"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/jamia/ocad191</pub-id>
          <pub-id pub-id-type="medline">37748456</pub-id>
          <pub-id pub-id-type="pii">7281919</pub-id>
          <pub-id pub-id-type="pmcid">PMC10746311</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref80">
        <label>80</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Bernier</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Knoppers</surname>
              <given-names>BM</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in cardiovascular imaging: "unexplainable" legal and ethical challenges?</article-title>
          <source>Can J Cardiol</source>
          <year>2022</year>
          <month>02</month>
          <volume>38</volume>
          <issue>2</issue>
          <fpage>225</fpage>
          <lpage>33</lpage>
          <pub-id pub-id-type="doi">10.1016/j.cjca.2021.10.009</pub-id>
          <pub-id pub-id-type="medline">34737036</pub-id>
          <pub-id pub-id-type="pii">S0828-282X(21)00810-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref81">
        <label>81</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Markus</surname>
              <given-names>AF</given-names>
            </name>
            <name name-style="western">
              <surname>Kors</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Rijnbeek</surname>
              <given-names>PR</given-names>
            </name>
          </person-group>
          <article-title>The role of explainability in creating trustworthy artificial intelligence for health care: a comprehensive survey of the terminology, design choices, and evaluation strategies</article-title>
          <source>J Biomed Inform</source>
          <year>2021</year>
          <month>01</month>
          <volume>113</volume>
          <fpage>103655</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1532-0464(20)30283-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jbi.2020.103655</pub-id>
          <pub-id pub-id-type="medline">33309898</pub-id>
          <pub-id pub-id-type="pii">S1532-0464(20)30283-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref82">
        <label>82</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Adadi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Berrada</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Peeking inside the black-box: a survey on explainable artificial intelligence (XAI)</article-title>
          <source>IEEE Access</source>
          <year>2018</year>
          <volume>6</volume>
          <fpage>52138</fpage>
          <lpage>60</lpage>
          <pub-id pub-id-type="doi">10.1109/access.2018.2870052</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref83">
        <label>83</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Preece</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Asking ‘why’ in AI: explainability of intelligent systems – perspectives and challenges</article-title>
          <source>Intell Sys Acc Fin Mgmt</source>
          <year>2018</year>
          <month>04</month>
          <day>19</day>
          <volume>25</volume>
          <issue>2</issue>
          <fpage>63</fpage>
          <lpage>72</lpage>
          <pub-id pub-id-type="doi">10.1002/isaf.1422</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref84">
        <label>84</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kumar</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Aelgani</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Vohra</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Gupta</surname>
              <given-names>SK</given-names>
            </name>
            <name name-style="western">
              <surname>Bhagawati</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Paul</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Saba</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Suri</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Khanna</surname>
              <given-names>NN</given-names>
            </name>
            <name name-style="western">
              <surname>Laird</surname>
              <given-names>JR</given-names>
            </name>
            <name name-style="western">
              <surname>Johri</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Kalra</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Fouda</surname>
              <given-names>MM</given-names>
            </name>
            <name name-style="western">
              <surname>Fatemi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Naidu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Suri</surname>
              <given-names>JS</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence bias in medical system designs: a systematic review</article-title>
          <source>Multimed Tools Appl</source>
          <year>2023</year>
          <month>07</month>
          <day>22</day>
          <volume>83</volume>
          <issue>6</issue>
          <fpage>18005</fpage>
          <lpage>57</lpage>
          <pub-id pub-id-type="doi">10.1007/s11042-023-16029-x</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref85">
        <label>85</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Buolamwini</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Gebru</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Gender shades: intersectional accuracy disparities in commercial gender classification</article-title>
          <source>Proceedings of the 2018 International Conference on Machine Learning and Machine Intelligence</source>
          <year>2018</year>
          <conf-name>MLMI '18</conf-name>
          <conf-date>September 28-30, 2018</conf-date>
          <conf-loc>Ha Noi, VietNam</conf-loc>
          <fpage>1</fpage>
          <lpage>15</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref86">
        <label>86</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>White</surname>
              <given-names>RS</given-names>
            </name>
            <name name-style="western">
              <surname>Andreae</surname>
              <given-names>MH</given-names>
            </name>
            <name name-style="western">
              <surname>Lui</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Tangel</surname>
              <given-names>VE</given-names>
            </name>
            <name name-style="western">
              <surname>Turnbull</surname>
              <given-names>ZA</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>SY</given-names>
            </name>
            <name name-style="western">
              <surname>Nachamie</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Pryor</surname>
              <given-names>KO</given-names>
            </name>
            <collab>Multicenter Perioperative Outcomes Group Collaborators</collab>
          </person-group>
          <article-title>Antiemetic administration and its association with race: a retrospective cohort study</article-title>
          <source>Anesthesiology</source>
          <year>2023</year>
          <month>06</month>
          <day>01</day>
          <volume>138</volume>
          <issue>6</issue>
          <fpage>587</fpage>
          <lpage>601</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://pubs.asahq.org/anesthesiology/article-lookup/doi/10.1097/ALN.0000000000004549"/>
          </comment>
          <pub-id pub-id-type="doi">10.1097/ALN.0000000000004549</pub-id>
          <pub-id pub-id-type="medline">37158649</pub-id>
          <pub-id pub-id-type="pii">137851</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref87">
        <label>87</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Seyyed-Kalantari</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>McDermott</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>IY</given-names>
            </name>
            <name name-style="western">
              <surname>Ghassemi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>CheXclusion: fairness gaps in deep chest X-ray classifiers</article-title>
          <source>Proceedings of the 2021 Pacific Symposium</source>
          <year>2021</year>
          <conf-name>Biocomputing '21</conf-name>
          <conf-date>January 3-7, 2021</conf-date>
          <conf-loc>Kohala, HI</conf-loc>
          <fpage>232</fpage>
          <lpage>243</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.worldscientific.com/doi/abs/10.1142/9789811232701_0022"/>
          </comment>
          <pub-id pub-id-type="doi">10.1142/9789811232701_0022</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref88">
        <label>88</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lam</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>CY</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Martin</surname>
              <given-names>AR</given-names>
            </name>
            <name name-style="western">
              <surname>Bryois</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Gaspar</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Ikeda</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Benyamin</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Brown</surname>
              <given-names>BC</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Guan</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Kamatani</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kubo</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kusumawardhani</surname>
              <given-names>AA</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Periyasamy</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Takahashi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>F</given-names>
            </name>
            <collab>Schizophrenia Working Group of the Psychiatric Genomics Consortium</collab>
            <collab>Indonesia Schizophrenia Consortium</collab>
            <collab>Genetic REsearch on schizophreniA neTwork-Chinathe Netherlands (GREAT-CN)</collab>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>WJ</given-names>
            </name>
            <name name-style="western">
              <surname>Faraone</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Glatt</surname>
              <given-names>SJ</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Hyman</surname>
              <given-names>SE</given-names>
            </name>
            <name name-style="western">
              <surname>Hwu</surname>
              <given-names>HG</given-names>
            </name>
            <name name-style="western">
              <surname>McCarroll</surname>
              <given-names>SA</given-names>
            </name>
            <name name-style="western">
              <surname>Neale</surname>
              <given-names>BM</given-names>
            </name>
            <name name-style="western">
              <surname>Sklar</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Wildenauer</surname>
              <given-names>DB</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Mowry</surname>
              <given-names>BJ</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Holmans</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sullivan</surname>
              <given-names>PF</given-names>
            </name>
            <name name-style="western">
              <surname>Ripke</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>O'Donovan</surname>
              <given-names>MC</given-names>
            </name>
            <name name-style="western">
              <surname>Daly</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Qin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sham</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Iwata</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Hong</surname>
              <given-names>KS</given-names>
            </name>
            <name name-style="western">
              <surname>Schwab</surname>
              <given-names>SG</given-names>
            </name>
            <name name-style="western">
              <surname>Yue</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Tsuang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Kahn</surname>
              <given-names>RS</given-names>
            </name>
            <name name-style="western">
              <surname>Shi</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Comparative genetic architectures of schizophrenia in East Asian and European populations</article-title>
          <source>Nat Genet</source>
          <year>2019</year>
          <month>12</month>
          <day>18</day>
          <volume>51</volume>
          <issue>12</issue>
          <fpage>1670</fpage>
          <lpage>8</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/31740837"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41588-019-0512-x</pub-id>
          <pub-id pub-id-type="medline">31740837</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41588-019-0512-x</pub-id>
          <pub-id pub-id-type="pmcid">PMC6885121</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref89">
        <label>89</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>RJ</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>JJ</given-names>
            </name>
            <name name-style="western">
              <surname>Williamson</surname>
              <given-names>DF</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>TY</given-names>
            </name>
            <name name-style="western">
              <surname>Lipkova</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>MY</given-names>
            </name>
            <name name-style="western">
              <surname>Sahai</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mahmood</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Algorithmic fairness in artificial intelligence for medicine and healthcare</article-title>
          <source>Nat Biomed Eng</source>
          <year>2023</year>
          <month>06</month>
          <day>28</day>
          <volume>7</volume>
          <issue>6</issue>
          <fpage>719</fpage>
          <lpage>42</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37380750"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41551-023-01056-8</pub-id>
          <pub-id pub-id-type="medline">37380750</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41551-023-01056-8</pub-id>
          <pub-id pub-id-type="pmcid">PMC10632090</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref90">
        <label>90</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>O'Reilly-Shah</surname>
              <given-names>VN</given-names>
            </name>
            <name name-style="western">
              <surname>Gentry</surname>
              <given-names>KR</given-names>
            </name>
            <name name-style="western">
              <surname>Walters</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Zivot</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Anderson</surname>
              <given-names>CT</given-names>
            </name>
            <name name-style="western">
              <surname>Tighe</surname>
              <given-names>PJ</given-names>
            </name>
          </person-group>
          <article-title>Bias and ethical considerations in machine learning and the automation of perioperative risk assessment</article-title>
          <source>Br J Anaesth</source>
          <year>2020</year>
          <month>12</month>
          <volume>125</volume>
          <issue>6</issue>
          <fpage>843</fpage>
          <lpage>6</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0007-0912(20)30631-0"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.bja.2020.07.040</pub-id>
          <pub-id pub-id-type="medline">32838979</pub-id>
          <pub-id pub-id-type="pii">S0007-0912(20)30631-0</pub-id>
          <pub-id pub-id-type="pmcid">PMC7442146</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref91">
        <label>91</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Landers</surname>
              <given-names>RN</given-names>
            </name>
            <name name-style="western">
              <surname>Behrend</surname>
              <given-names>TS</given-names>
            </name>
          </person-group>
          <article-title>Auditing the AI auditors: a framework for evaluating fairness and bias in high stakes AI predictive models</article-title>
          <source>Am Psychol</source>
          <year>2023</year>
          <month>01</month>
          <volume>78</volume>
          <issue>1</issue>
          <fpage>36</fpage>
          <lpage>49</lpage>
          <pub-id pub-id-type="doi">10.1037/amp0000972</pub-id>
          <pub-id pub-id-type="medline">35157476</pub-id>
          <pub-id pub-id-type="pii">2022-30899-001</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref92">
        <label>92</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gerke</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Minssen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Cohen</surname>
              <given-names>IG</given-names>
            </name>
          </person-group>
          <article-title>Ethical and legal challenges of artificial intelligence-driven health care</article-title>
          <source>SSRN Journal. Preprint posted online May 26, 2020</source>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3570129"/>
          </comment>
          <pub-id pub-id-type="doi">10.2139/ssrn.3570129</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref93">
        <label>93</label>
        <nlm-citation citation-type="web">
          <article-title>Google DeepMind NHS app test broke UK privacy law</article-title>
          <source>BBC News</source>
          <year>2017</year>
          <access-date>2024-04-29</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.bbc.com/news/technology-40483202">https://www.bbc.com/news/technology-40483202</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref94">
        <label>94</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liao</surname>
              <given-names>SM</given-names>
            </name>
          </person-group>
          <article-title>Ethics of AI and health care: towards a substantive human rights framework</article-title>
          <source>Topoi</source>
          <year>2023</year>
          <month>04</month>
          <day>12</day>
          <volume>42</volume>
          <issue>3</issue>
          <fpage>857</fpage>
          <lpage>66</lpage>
          <pub-id pub-id-type="doi">10.1007/s11245-023-09911-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref95">
        <label>95</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Obermeyer</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Powers</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Vogeli</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Mullainathan</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Dissecting racial bias in an algorithm used to manage the health of populations</article-title>
          <source>Science</source>
          <year>2019</year>
          <month>10</month>
          <day>25</day>
          <volume>366</volume>
          <issue>6464</issue>
          <fpage>447</fpage>
          <lpage>53</lpage>
          <pub-id pub-id-type="doi">10.1126/science.aax2342</pub-id>
          <pub-id pub-id-type="medline">31649194</pub-id>
          <pub-id pub-id-type="pii">366/6464/447</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref96">
        <label>96</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Solanki</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Grundy</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Hussain</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Operationalising ethics in artificial intelligence for healthcare: a framework for AI developers</article-title>
          <source>AI Ethics</source>
          <year>2022</year>
          <month>07</month>
          <day>19</day>
          <volume>3</volume>
          <issue>1</issue>
          <fpage>223</fpage>
          <lpage>40</lpage>
          <pub-id pub-id-type="doi">10.1007/s43681-022-00195-z</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref97">
        <label>97</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rogers</surname>
              <given-names>WA</given-names>
            </name>
            <name name-style="western">
              <surname>Draper</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Carter</surname>
              <given-names>SM</given-names>
            </name>
          </person-group>
          <article-title>Evaluation of artificial intelligence clinical applications: detailed case analyses show value of healthcare ethics approach in identifying patient care issues</article-title>
          <source>Bioethics</source>
          <year>2021</year>
          <month>09</month>
          <day>28</day>
          <volume>35</volume>
          <issue>7</issue>
          <fpage>623</fpage>
          <lpage>33</lpage>
          <pub-id pub-id-type="doi">10.1111/bioe.12885</pub-id>
          <pub-id pub-id-type="medline">34046918</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref98">
        <label>98</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Goirand</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Austin</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Clay-Williams</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Implementing ethics in healthcare AI-based applications: a scoping review</article-title>
          <source>Sci Eng Ethics</source>
          <year>2021</year>
          <month>09</month>
          <day>03</day>
          <volume>27</volume>
          <issue>5</issue>
          <fpage>61</fpage>
          <pub-id pub-id-type="doi">10.1007/s11948-021-00336-3</pub-id>
          <pub-id pub-id-type="medline">34480239</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11948-021-00336-3</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
