<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Hum Factors</journal-id><journal-id journal-id-type="publisher-id">humanfactors</journal-id><journal-id journal-id-type="index">6</journal-id><journal-title>JMIR Human Factors</journal-title><abbrev-journal-title>JMIR Hum Factors</abbrev-journal-title><issn pub-type="epub">2292-9495</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v12i1e71065</article-id><article-id pub-id-type="doi">10.2196/71065</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Practitioner Perspectives on the Uses of Generative AI Chatbots in Mental Health Care: Mixed Methods Study</article-title></title-group><contrib-group><contrib contrib-type="author"><name name-style="western"><surname>Goldie</surname><given-names>Jessie</given-names></name><degrees>LLB, BA, GDipPsych</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Dennis</surname><given-names>Simon</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Hipgrave</surname><given-names>Lyndsey</given-names></name><degrees>BSocSci, BSW(Hons)</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Coleman</surname><given-names>Amanda</given-names></name><degrees>BS, GradDipProPsych</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib></contrib-group><aff id="aff1"><institution>Melbourne School of Psychological Sciences, Medicine, Dentistry and Health Sciences, University of Melbourne</institution><addr-line>Grattan Street, Parkville</addr-line><addr-line>Melbourne</addr-line><country>Australia</country></aff><aff id="aff2"><institution>Monash School of Psychological Sciences, Faculty of Medicine, Nursing and Health Sciences, Monash University</institution><addr-line>Melbourne</addr-line><country>Australia</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Choudhury</surname><given-names>Avishek</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Dal</surname><given-names>Emre</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Zhang</surname><given-names>Lei</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Elbattah</surname><given-names>Mahmoud</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Simon Dennis, PhD, Melbourne School of Psychological Sciences, Medicine, Dentistry and Health Sciences, University of Melbourne, Grattan Street, Parkville, Melbourne, 3010, Australia, 61 467607835; <email>simon.dennis@unimelb.edu.au</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>16</day><month>9</month><year>2025</year></pub-date><volume>12</volume><elocation-id>e71065</elocation-id><history><date date-type="received"><day>09</day><month>01</month><year>2025</year></date><date date-type="rev-recd"><day>07</day><month>08</month><year>2025</year></date><date date-type="accepted"><day>10</day><month>08</month><year>2025</year></date></history><copyright-statement>&#x00A9;Jessie Goldie, Simon Dennis, Lyndsey Hipgrave, Amanda Coleman. Originally published in JMIR Human Factors (<ext-link ext-link-type="uri" xlink:href="https://humanfactors.jmir.org">https://humanfactors.jmir.org</ext-link>), 16.9.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Human Factors, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://humanfactors.jmir.org">https://humanfactors.jmir.org</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://humanfactors.jmir.org/2025/1/e71065"/><abstract><sec><title>Background</title><p>Generative artificial intelligence (AI) chatbots have the potential to improve mental health care for practitioners and clients. Evidence demonstrates that AI chatbots can assist with tasks such as documentation, research, counseling, and therapeutic exercises. However, research examining practitioners&#x2019; perspectives is limited.</p></sec><sec><title>Objective</title><p>This mixed-methods study investigates: (1) practitioners&#x2019; perspectives on different uses of generative AI chatbots; (2) their likelihood of recommending chatbots to clients; and (3) whether recommendation likelihood increases after viewing a demonstration.</p></sec><sec sec-type="methods"><title>Methods</title><p>Participants were 23 mental health practitioners, including 17 females and 6 males, with a mean age of 39.39 (SD 16.20) years. In 45-minute interviews, participants selected their 3 most helpful uses of chatbots from 11 options and rated their likelihood of recommending chatbots to clients on a Likert scale before and after an 11-minute chatbot demonstration.</p></sec><sec sec-type="results"><title>Results</title><p>Binomial tests found that Generating case notes was selected at greater-than-chance levels ( 15/23, 65%; <italic>P</italic>=.001), while Support with session planning (<italic>P</italic>=.86) and Identifying and suggesting literature (<italic>P</italic>=.10) were not. Although 55% (12/23) were likely to recommend chatbots to clients, a binomial test found no significant difference from the 50% threshold (<italic>P</italic>=.74). A paired samples <italic>t</italic> test found that recommendation likelihood increased significantly (19/23, 83%; <italic>P</italic>=.002) from predemonstration to postdemonstration.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Findings suggest practitioners favor administrative uses of generative AI and are more likely to recommend chatbots to clients after exposure. This study highlights a need for practitioner education and guidelines to support safe and effective AI integration in mental health care.</p></sec></abstract><kwd-group><kwd>artificial intelligence</kwd><kwd>ChatGPT</kwd><kwd>mental health care</kwd><kwd>practitioner perspectives</kwd><kwd>mixed methods</kwd><kwd>digital health</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><sec id="s1-1"><title>Overview</title><p>Globally, the prevalence of mental illness is rising and health care systems are under increasing pressure [<xref ref-type="bibr" rid="ref1">1</xref>]. In Australia, individuals face barriers to mental health care, including financial costs and extended wait times for appointments, while practitioners are struggling to meet demand [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref3">3</xref>]. In this context, artificial intelligence (AI) is a potential game changer, offering 24/7 support for individuals, thus alleviating pressure on mental health practitioners [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref5">5</xref>]. Current models engage users in therapeutic conversations, with many using &#x201C;rule-based&#x201D; scripted responses and predefined dialog structures [<xref ref-type="bibr" rid="ref6">6</xref>]. This technology differs from generative AI chatbots, such as ChatGPT (OpenAI), which generate original content and provide lifelike interactions that closely imitate humans.</p><p>Generative AI can offer more empathetic emotional support in comparison to rule-based chatbots [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref7">7</xref>-<xref ref-type="bibr" rid="ref9">9</xref>], making it more effective in counseling conversations and more acceptable to clients. A recent qualitative study exploring the efficacy of ChatGPT in supporting 24 individuals with mental health conditions found that more than 50% of participants found ChatGPT to be empathetic and 80% said the tool helped to manage symptoms [<xref ref-type="bibr" rid="ref10">10</xref>]. However, participants also raised concerns regarding data privacy and the risk of personal data theft, uncertainty in the reliability of the information shared and the quality of training data, and a lack of understanding of cultural determinants of mental health.</p><p>Generative AI chatbots may also provide improved access to mental health information and psychoeducation. Preprint and peer-reviewed studies assessing the quality of psychoeducational information generated by ChatGPT on depression, anxiety, psychosis, and substance abuse found that it was accurate, clear, clinically useful, relevant, and empathetic [<xref ref-type="bibr" rid="ref11">11</xref>-<xref ref-type="bibr" rid="ref13">13</xref>]. Furthermore, studies have found generative AI chatbots to be effective in supporting adherence to psychological treatments [<xref ref-type="bibr" rid="ref14">14</xref>] and delivering positive psychology interventions [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>]. These studies indicate that generative AI chatbots may improve individuals&#x2019; access to information and basic therapeutic exercises, allowing clinicians to focus on more complex tasks, such as diagnosis and treatment.</p><p>Studies have identified a variety of uses of AI for practitioners, including research, documentation, administering psychological assessments, clinic management, and triaging [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref17">17</xref>-<xref ref-type="bibr" rid="ref19">19</xref>]. Generative AI could enhance these functions by offering faster processing speeds, more comprehensive client histories, and higher empathy [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>]. However, like rule-based AI models, studies have found generative AI chatbots are prone to mistakes, with one study finding that ChatGPT included incorrect information in 36% of client documents in a medical setting [<xref ref-type="bibr" rid="ref21">21</xref>]. Another study evaluated ChatGPT&#x2019;s performance in conducting mental health assessments and found that, while responses for simple cases were acceptable, recommendations for more complex cases were inappropriate and potentially harmful [<xref ref-type="bibr" rid="ref22">22</xref>]. These findings highlight the risks associated with AI, including hallucinations and misinterpretations.</p><p>Generative AI may also be capable of more sophisticated functions, such as recommending diagnoses and treatment options. Bartal et al [<xref ref-type="bibr" rid="ref23">23</xref>] found that ChatGPT was able to accurately identify posttraumatic stress disorder (PTSD) following childbirth by analyzing individuals&#x2019; birth stories, achieving high sensitivity (85%) and specificity (75%), aligning with commonly accepted benchmarks of reliability [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>]. Similarly, another case study demonstrated that ChatGPT can identify treatment-resistant schizophrenia and suggest appropriate treatment options [<xref ref-type="bibr" rid="ref26">26</xref>]. Despite these innovative applications, some argue that there is a need for significant improvement before AI is adopted in mental health care [<xref ref-type="bibr" rid="ref27">27</xref>].</p><p>Clinicians appear cautious about the use of AI and feel more education and training are required [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]. In a systematic review of AI in mental health care, Rogan et al [<xref ref-type="bibr" rid="ref28">28</xref>] found that most studies highlighted a need for clinician training, emphasizing that &#x201C;AI will only be useful if clinicians understand and feel comfortable using it&#x201D;. Similarly, in a survey of 138 American psychiatrists, Blease et al [<xref ref-type="bibr" rid="ref29">29</xref>] found that 90% of participants agreed that clinicians need more support to understand generative AI. Qualitative responses were mixed, with some participants expressing uncertainty and concerns relating to potential harm and others overestimating the readiness of generative AI to deliver clinical tasks.</p><p>While practitioners have expressed a need for more training and support, many may already be using AI tools for internal purposes. Blease et al [<xref ref-type="bibr" rid="ref29">29</xref>] found that more than half of psychiatrists surveyed had used generative AI chatbots to answer clinical questions. A survey of 86 Australian mental health professionals found that 43% had used AI and that ChatGPT was the most commonly used tool [<xref ref-type="bibr" rid="ref19">19</xref>]. Likewise, a systematic review preprint on the use of generative AI in health care found that the technology is &#x201C;heavily applied&#x201D; for documentation, research, and administration among medical practitioners [<xref ref-type="bibr" rid="ref30">30</xref>].</p><p>Research examining practitioners&#x2019; use of generative AI chatbots with clients is more limited. One study found strong support among clinicians to use chatbots with clients, with 80% of participants reporting they would be either very likely (24%) or somewhat likely (56%) to recommend AI mental health chatbots to clients within the next 5 years [<xref ref-type="bibr" rid="ref31">31</xref>]. Participants may have been more supportive as they were asked to consider their future likelihood and, therefore, the potential for evolution of both the technology and their understanding.</p></sec><sec id="s1-2"><title>Objective</title><p>This study aims to explore which functions of generative AI chatbots mental health practitioners think are most useful, and to what extent they would recommend them to clients. In addition, this study will investigate the impact of exposure to a generative AI chatbot on participants&#x2019; likelihood of recommending chatbots to clients.</p><p>This study uses both quantitative and qualitative data to address 3 research questions (RQ) aligned with corresponding hypotheses. These RQs address key gaps in the literature, including limited insight into which functions practitioners prefer, a lack of empirical data on their willingness to recommend generative AI chatbots to clients, and minimal understanding of how exposure to the technology might influence those attitudes.</p><p>RQ1 explores practitioners&#x2019; views on the most useful functions of generative AI chatbots in mental health care. It is hypothesized that functions such as generating case notes (hypothesis 1), supporting session planning (hypothesis 2), and identifying literature (hypothesis 3) will be prioritized. Previous research suggests clinicians favor using AI for internal tasks like documentation and research [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>].</p><p>RQ2 examines the likelihood of mental health practitioners recommending a generative AI chatbot to clients. It is hypothesized (hypothesis 4) that fewer than 50% will do so, as practitioners typically have low technology literacy [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref32">32</xref>].</p><p>RQ3 investigates whether practitioners are more likely to recommend an AI chatbot after a demonstration, with the prediction (hypothesis 5) that exposure to a demo will significantly increase their likelihood to recommend it. Research indicates that familiarity with health care technologies improves clinician adoption [<xref ref-type="bibr" rid="ref33">33</xref>].</p></sec></sec><sec id="s2" sec-type="methods"><title>Method</title><sec id="s2-1"><title>Participants</title><p>Participants were recruited using a purposive, convenience sampling method. Digital flyers were distributed on forums including the Australian Psychological Society (APS), Australian Association of Psychologists Inc (AAPi), through Melbourne University channels, and with friends and family. Individuals who were not working in a client-facing, mental health role were excluded from the study.</p><p>In total, 23 individuals participated in the study, and all were working in Australia. Sample demographics are presented in <xref ref-type="table" rid="table1">Table 1</xref>.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Sample demographic information (N=23).</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Demographic variables</td><td align="left" valign="bottom">Participants (N=23)</td></tr></thead><tbody><tr><td align="left" valign="top">Gender, n (%)</td><td align="left" valign="top">&#x2003;</td></tr><tr><td align="left" valign="top">&#x2003;Men</td><td align="left" valign="top">6 (26)</td></tr><tr><td align="left" valign="top">&#x2003;Women</td><td align="left" valign="top">17 (74)</td></tr><tr><td align="left" valign="top">Age (years), mean (SD)</td><td align="left" valign="top">39.39 (16.20)</td></tr><tr><td align="left" valign="top">Clinical role, n (%)</td><td align="left" valign="top">&#x2003;</td></tr><tr><td align="left" valign="top">&#x2003;Psychologist (including provisional)</td><td align="left" valign="top">10 (30)</td></tr><tr><td align="left" valign="top">&#x2003;Counselor or psychotherapist</td><td align="left" valign="top">6 (26)</td></tr><tr><td align="left" valign="top">&#x2003;Mental health social worker</td><td align="left" valign="top">4 (17)</td></tr><tr><td align="left" valign="top">&#x2003;Psychiatrist</td><td align="left" valign="top">2 (9)</td></tr><tr><td align="left" valign="top">&#x2003;Mental health support worker</td><td align="left" valign="top">1 (4)</td></tr><tr><td align="left" valign="top">Ethnicity, n (%)</td><td align="left" valign="top">&#x2003;</td></tr><tr><td align="left" valign="top">&#x2003;Caucasian or European</td><td align="left" valign="top">15 (65)</td></tr><tr><td align="left" valign="top">&#x2003;Asian</td><td align="left" valign="top">5 (22)</td></tr><tr><td align="left" valign="top">&#x2003;Mixed ethnicity</td><td align="left" valign="top">3 (13)</td></tr><tr><td align="left" valign="top">Area of work, n (%)</td><td align="left" valign="top">&#x2003;</td></tr><tr><td align="left" valign="top">&#x2003;Private sector</td><td align="left" valign="top">8 (35)</td></tr><tr><td align="left" valign="top">&#x2003;Public sector</td><td align="left" valign="top">9 (39)</td></tr><tr><td align="left" valign="top">&#x2003;Mixed</td><td align="left" valign="top">6 (26)</td></tr><tr><td align="left" valign="top">Years of work experience, mean (SD)</td><td align="left" valign="top">10.78 (14.31)</td></tr></tbody></table></table-wrap></sec><sec id="s2-2"><title>Procedure</title><p>The study team devised interview questions (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) to explore practitioners&#x2019; perspectives on generative AI chatbots in mental health care. The interviews were of 45 minutes and conducted over Zoom (Zoom Video Communications, Inc.) between May and August 2024. Open-ended, Likert-scale, and ranking questions were used to collect both qualitative and quantitative data. Interviews were recorded and transcribed using AI software, Notta (Notta Inc), to support qualitative analysis.</p></sec><sec id="s2-3"><title>Uses of Generative AI Chatbots in Mental Health Care</title><p>Participants were asked to select their top 3 &#x201C;most useful&#x201D; functions of generative AI chatbots from a list of 11 options and explain their reasoning. The uses were:</p><list list-type="order"><list-item><p>Generating case notes</p></list-item><list-item><p>Support with session planning</p></list-item><list-item><p>Client triaging</p></list-item><list-item><p>Client onboarding processes</p></list-item><list-item><p>Administering psychological assessments</p></list-item><list-item><p>Supporting therapist-directed exercises</p></list-item><list-item><p>Supporting self-directed exercises</p></list-item><list-item><p>Symptom tracking and monitoring</p></list-item><list-item><p>Identifying and suggesting literature relevant to a client&#x2019;s profile</p></list-item><list-item><p>Psychoeducation and socialization to therapeutic models</p></list-item><list-item><p>Counseling</p></list-item></list><p>An option to select &#x201C;Other&#x201D; was offered and subsequently removed from the analysis as it was not selected by any participants.</p></sec><sec id="s2-4"><title>Likelihood of Recommending a Chatbot to a Client</title><p>Participants were asked, &#x201C;How likely are you to recommend a chatbot to a client?&#x201D; on a Likert scale of 1 (Highly unlikely) to 4 (Highly likely). This question was asked both before (T1) and after (T2) viewing a demonstration of a generative AI chatbot.</p><p>The demonstration was an 11-minute screen recording of a generative AI chatbot that had been built using GPT-4o (OpenAI) for the purposes of this research. During the demonstration, a fictional client (&#x201C;Saman&#x201D;) interacted with the chatbot to illustrate a range of use cases, from triaging, psychological assessment, clinic onboarding, counseling, and completing a gratitude exercise. The demonstration also showed Saman&#x2019;s fictional therapist using the chatbot to generate case notes, identify literature relevant to Saman&#x2019;s profile, and plan future sessions (<xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>).</p><p>Although the demonstration featured a chatbot built using GPT-4o, participants were asked to rate and reflect on generative AI chatbots for mental health support more generally, rather than the specific tool presented. Only the text-based conversational capacity of GPT-4o was used, with safety filters (mechanisms that monitor and restrict the model&#x2019;s output to reduce risk of harm), and real-time context (allowing the chatbot to access external data and information) active by default. These features were not explicitly evaluated but were considered reflective of current generative AI chatbot capabilities.</p></sec><sec id="s2-5"><title>Data Analysis</title><sec id="s2-5-1"><title>Quantitative</title><sec id="s2-5-1-1"><title>RQ1: Uses of Generative AI Chatbots</title><p>Data were analyzed to determine the proportion of participants who selected each use and plot the SE. To test statistical hypotheses (hypothesis 1, hypothesis 2, and hypothesis 3), 3 binomial tests were conducted to assess whether specific uses were selected at greater than chance levels.</p></sec><sec id="s2-5-1-2"><title>RQ2: Likelihood of Recommending a Chatbot to a Client</title><p>A binomial test was conducted to test hypothesis 4 and analyze whether less than 50% of participants would be likely to recommend a chatbot. A Bayesian binomial test was conducted to evaluate the strength of the evidence in favor of the alternative hypothesis.</p></sec><sec id="s2-5-1-3"><title>RQ3: Increase in Likelihood of Recommending a Chatbot After a Demonstration</title><p>To test hypothesis 5, a paired-samples <italic>t</italic> test was conducted to measure whether there was a significant difference in the mean likelihood of recommending a chatbot before (T1) and after (T2) the demonstration. A Bayesian paired samples <italic>t</italic> test assessed the strength of the evidence in favor of the alternative hypothesis (a significant increase in recommendation likelihood from T1 to T2).</p></sec></sec></sec><sec id="s2-6"><title>Qualitative</title><p>A thematic analysis was conducted following Braun and Clarke&#x2019;s [<xref ref-type="bibr" rid="ref34">34</xref>] 6-phase framework. Analysis began with familiarization, where transcripts were reviewed to identify initial ideas. Themes were then generated based on recurring concepts, iterated, and refined to ensure they captured core insights. Finally, qualitative themes were considered alongside the quantitative results to identify key research findings. Where participants are quoted, a reference is provided with their participant identification number (#), gender (men or women), and age.</p></sec><sec id="s2-7"><title>Ethical Considerations</title><p>All participants provided informed consent and no compensation was offered. Participants were informed in advance that participation was voluntary, and they could withdraw at any time without consequence. Data were stored securely, and participant confidentiality was secured by anonymizing all data. Ethics approval was obtained from the University of Melbourne Human Research Ethics Committee (#28479).</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Overview of Qualitative Themes</title><p>Thematic analysis produced 9 qualitative themes (QTs), each aligned to a particular RQ, as shown in <xref ref-type="fig" rid="figure1">Figure 1</xref>. These themes are reported in further detail throughout the following section.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Summary of qualitative themes.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="humanfactors_v12i1e71065_fig01.png"/></fig></sec><sec id="s3-2"><title>RQ1: Uses of Generative AI Chatbots</title><p>Proportions of participants who selected each use in their top 3 are presented in <xref ref-type="fig" rid="figure2">Figure 2</xref>.</p><p>A binomial test was conducted to assess whether Generating case notes was selected in participants&#x2019; top 3 choices at greater than chance levels. The chance probability was calculated at 0.25, based on the probability of a participant selecting an option, acknowledging that they had 3 choices and could not select the same option twice (<italic>P</italic>(selected)=1 - [1- 1/11] x [1 - 1/10] x [1- 1/9]). The results were significant, <italic>P</italic>=.001, indicating that Generating case notes was selected at greater than chance levels. A binomial test indicated that Support with session planning was not selected in participants&#x2019; top 3 choices at greater than chance levels (0.25; <italic>P</italic>=.86). A binomial test indicated that Identifying literature and Suggesting literature was not selected in participants&#x2019; top 3 choices at greater than chance levels (0.25; <italic>P</italic>=.10).</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Proportion of participants selecting each use of generative AI chatbots. Error bars represent the SE of the proportion of participants selecting each use.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="humanfactors_v12i1e71065_fig02.png"/></fig><sec id="s3-2-1"><title>QT1: Potential for Administrative Efficiencies</title><p>Qualitative analysis indicated that participants value the potential of generative AI chatbots to reduce time spent writing case notes. One participant explained:</p><disp-quote><p>Writing clinical notes, that takes up probably more time in therapists&#x2019; days than they would like... having a process to be able to. dump your very verbatim notes from a session and have it spit out a very succinct summary of the mental state formulation. That would be awesome</p><attrib>#20, F, 29.</attrib></disp-quote><p>Another participant emphasized that using a chatbot to document case notes would enable them to spend more time with clients,</p><disp-quote><p>Generating case notes strikes me as just a time-consuming and aversive activity&#x2026; you save yourself time to spend more time with clients and less time on admin</p><attrib>#4, M, 42.</attrib></disp-quote></sec><sec id="s3-2-2"><title>QT2: Chatbots Useful as a Thought Partner</title><p>Qualitative responses indicated that participants saw the potential of generative AI chatbots to assist with session planning by providing new ideas, though it wasn&#x2019;t seen as a necessity. One participant stated,</p><disp-quote><p>For help with session planning I feel like well it&#x2019;s good with just reflecting on ideas and you know talk about things that you maybe haven&#x2019;t thought about</p><attrib>#2, F, 27.</attrib></disp-quote><p>Another explained,</p><disp-quote><p>I mean, you could do it without it, but it&#x2019;s always helpful to just to maybe get another perspective or consider something else</p><attrib>#17, F, 35.</attrib></disp-quote></sec><sec id="s3-2-3"><title>QT3: Importance of Human Oversight</title><p>While qualitative responses indicated that participants hoped generative AI chatbots might enhance their research, many stressed the importance of maintaining human oversight. One participant explained:</p><disp-quote><p>[Identifying literature] is something that we do digitally anyway and if we&#x2019;re able to customize a search to a client specific profile, that might make the search better. And then obviously, the clinician can still have the final say on that, but it would be helpful</p><attrib>#9, F, 24.</attrib></disp-quote><p>Several participants raised concerns around accuracy, with one participant remarking that,</p><disp-quote><p>It would be great to have a reliable source that can give us the latest research in what is best practice CBT [Cognitive Behavioral Therapy] for OCD [Obsessive Compulsive Disorder]... [ChatGPT] just never seems quite accurate</p><attrib>#11, F, 25.</attrib></disp-quote><p>Another explained,</p><disp-quote><p>I know that there&#x2019;s been a lot of speculation around finding accurate references&#x2026; I would always, you know, [use ChatGPT] in addition to my own search as well, just to be sure</p><attrib>#2, F, 27.</attrib></disp-quote></sec></sec><sec id="s3-3"><title>RQ2: Likelihood of Recommending a Chatbot to a Client</title><p>The distribution of participants&#x2019; likelihood of recommending a chatbot to a client at T2 is presented in <xref ref-type="fig" rid="figure3">Figure 3</xref>. Data from T2 (postdemonstration) was used for the analysis, as it best reflected participants&#x2019; informed perspectives of generative AI chatbots. One participant responded, &#x201C;I don&#x2019;t know,&#x201D; and their data were excluded from the quantitative analysis.</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Distribution of participants&#x2019; likelihood of recommending a generative AI chatbot (n = 22). Error bars represent the SE of the percentage of participants selecting each rating.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="humanfactors_v12i1e71065_fig03.png"/></fig><p>A binomial test was conducted to determine whether participants would be likely to recommend a generative AI chatbot to a client (defined as a response of 3 or 4 on the 4-point Likert scale). The proportion of participants likely to recommend was 55% (12/22), which was not significantly different from the 50% threshold (<italic>P</italic>=.74; odds ratio 1.20 [95% CI 0.47-3.17]). A Bayesian binomial test provided moderate evidence for the null hypothesis that more than 50% of participants would be likely to recommend a chatbot (BF<sub>01</sub>=5.24).</p><sec id="s3-3-1"><title>QT4: Lack of Familiarity With Generative AI Chatbots</title><p>Most participants were not familiar with generative AI chatbots in mental health care. One participant noted,</p><disp-quote><attrib>I don&#x2019;t feel like I understand the market well enough yet to recommend it to clients. I would need more experience and a chance to evaluate different models before trusting them for client use</attrib><attrib>#4, M, 42.</attrib></disp-quote><p>Others explained that they simply &#x201C;don&#x2019;t know enough about it&#x201D;. During introductory questions, only one participant reported having experience using an AI chatbot for mental health support for themselves or with a client.</p></sec><sec id="s3-3-2"><title>QT5: Ethical Concerns</title><p>Ethical concerns were frequently cited. One participant explained,</p><disp-quote><p>I don&#x2019;t think that it would be prudent or appropriate to recommend [chatbots] to clients just the same way that we wouldn&#x2019;t recommend unapproved treatments that we can&#x2019;t be fully confident in to clients because then we basically have liability</p><attrib>#9, F, 24.</attrib></disp-quote><p>Another commented,</p><disp-quote><p>Privacy would be my main concern. How do we manage to keep client information confidential if we&#x2019;re using it for note purposes?</p><attrib>#20, F, 29.</attrib></disp-quote></sec><sec id="s3-3-3"><title>QT6: Conditional Use With Clients</title><p>For many participants, their likelihood of recommending a chatbot was dependent on the type of client. One participant stated,</p><disp-quote><p>I do see a use for it as an emergency point of contact, but for anything beyond that, especially involving complex risk, I wouldn&#x2019;t feel comfortable recommending it</p><attrib>#9, F, 24.</attrib></disp-quote><p>Another explained that their etiological formulation would inform whether a chatbot would be appropriate:</p><disp-quote><p>A lot of people&#x2019;s addictions and mood disorders are born out of childhood trauma, and I think there is also a certain percentage of those who are <italic>enduring</italic> <italic>attachment wounds. And that&#x2019;s where AI is not going to be able to help</italic></p><attrib>#12, F, 66.</attrib></disp-quote><p>Others suggested their likelihood of recommending would be based on age, with one participant explaining,</p><disp-quote><p>It depends on the demographic. Maybe if it was a young person, I would. But in my practice, I don&#x2019;t actually come across many young people. So personally, probably no, because most of my demographics are older</p><attrib>#6, F, 54.</attrib></disp-quote></sec></sec><sec id="s3-4"><title>RQ3: Change in Likelihood of Recommending a Chatbot to a Client After the Demonstration</title><p>A one-tailed paired samples <italic>t</italic> test was conducted to assess whether there was a statistically significant increase in the likelihood of recommending a chatbot from T1 (mean 2.09 [SD 1.07]) to T2 (mean 2.64 [SD 0.79]). Results were significant, <italic>t</italic><sub>21</sub>=3.20; <italic>P</italic>=.002, with a mean difference of 0.55. The effect size (<italic>d</italic>=0.68) was medium-to-large [<xref ref-type="bibr" rid="ref35">35</xref>], suggesting a meaningful increase in recommendation likelihood.</p><p>A Bayesian paired samples <italic>t</italic> test provided strong evidence in favor of the alternative hypothesis (<italic>r</italic>=0.707; BF<sub>10</sub>=20.08), indicating that participants were more likely to recommend a chatbot after the demonstration (T2) compared to before (T1).</p><sec id="s3-4-1"><title>QT7: Increased Understanding Postdemonstration</title><p>Qualitative analysis suggested that the demonstration improved participants&#x2019; understanding of how generative AI chatbots can be used in mental health care. One participant stated,</p><disp-quote><p>I have a better sense of the additional value that AI can provide particularly around screening and triage</p><attrib>#4, M, 42.</attrib></disp-quote><p>Another stated that, &#x201C;It gave me new ideas on onboarding and case notes&#x201D; (#1, F, 25). Some participants were satisfied with how the chatbot performed client-facing tasks such as counseling, with one participant stating,</p><disp-quote><p>I was quite happy with how [the chatbot] dealt with everything in terms of like when it realized that the issues were quite severe and that it didn&#x2019;t jump to try to diagnose</p><attrib>#2, F, 27.</attrib></disp-quote></sec><sec id="s3-4-2"><title>QT8: Optimism for Future of Generative AI Chatbots</title><p>After the demonstration, several participants expressed optimism about the potential of generative AI. One participant remarked,</p><disp-quote><p>I imagine as the technology gets better and we&#x2019;re able to further program them to work in a really safe and diligent way around this sort of stuff, it probably will work quite smoothly and reliably</p><attrib>#7, F, 27.</attrib></disp-quote><p>One participant likened the introduction of AI chatbots to the evolution of telephone counseling, while another stated:</p><disp-quote><p>I know a lot of people firmly believe that AI will never be able to deliver that human touch. And I just think that I just think they&#x2019;re delusional, frankly, I think, I think they need to&#x2026; realize what&#x2019;s coming</p><attrib>#4, M, 42</attrib></disp-quote></sec><sec id="s3-4-3"><title>QT9: Heightened Client Safety Concerns Postdemonstration</title><p>Of the 10 participants who reported an increased likelihood of recommending a chatbot, 4 indicated that the demonstration had heightened their concerns regarding client safety. One participant stated,</p><disp-quote><attrib>I was spooked by how the AI responded when the person expressed suicidality&#x2026; it slightly ups my sense of the risk based on current technology</attrib><attrib>#5, F, 41.</attrib></disp-quote><p>Another commented,</p><disp-quote><attrib>The user was feeling a bit flat and all he was left with was a phone number, like rather than &#x2018;I will put you through to Lifeline right now&#x2019;, for example, just leaving risk like that, that&#x2019;s probably not helpful</attrib><attrib>#8, F, 58.</attrib></disp-quote></sec></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>This study found that practitioners appear to prefer administrative uses of generative AI chatbots, such as generating case notes, and are uncertain about recommending chatbots to clients. While exposure to a demonstration may increase the likelihood of recommending a chatbot, concerns regarding client safety and risk persist.</p><p>The first hypothesis (hypothesis 1) was supported. Generating case notes was selected as the most useful function of generative AI chatbots at greater than chance levels. The second (hypothesis 2) and third (hypothesis 3) hypotheses were not supported. Support with session planning and Identifying and suggesting literature were not selected at greater than chance levels. The fourth hypothesis (hypothesis 4), that less than 50% of participants would be likely to recommend a chatbot to a client, was not supported. The fifth hypothesis (hypothesis 5) was supported; participants were significantly more likely to recommend a chatbot to a client postdemonstration, and Bayesian analysis provided strong evidence for this increase.</p></sec><sec id="s4-2"><title>Practitioners Prefer Administrative Uses of Generative AI Chatbots</title><p>The results indicate a preference among practitioners for using generative AI chatbots for simple, administrative tasks. Quantitative and qualitative data highlight that practitioners see value in generative AI chatbots for drafting and summarizing case notes. Participants expressed enthusiasm for the potential administrative efficiencies that using a chatbot would allow more time to focus on client-facing work. This aligns with previous studies where practitioners reported using generative AI tools for documentation and research [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>]. This preference is also evident in the influx of AI-based tools for case note documentation in the market, such as Heidi (Heidi Health), Upheal (Upheal), and Autonotes (Autonotes AI, LLC) [<xref ref-type="bibr" rid="ref36">36</xref>-<xref ref-type="bibr" rid="ref38">38</xref>].</p><p>There is; however, some nuance in the types of administrative tasks practitioners are willing to use generative AI chatbots for. While descriptive data showed that Identifying and suggesting literature was the second most frequently selected use, it was not selected at greater than chance levels. Similarly, there was a lack of support for Session planning. Participants emphasized the importance of human oversight, indicating a lack of trust in generative AI. This aligns with findings from Blease et al [<xref ref-type="bibr" rid="ref29">29</xref>], where psychiatrists were cautious of using generative AI for tasks that require clinical expertise and raised concerns about accuracy. As such, while practitioners may not be willing to delegate more complex administrative tasks to generative AI chatbots, this study suggests that there is potential for the technology to be applied in a supporting capacity.</p></sec><sec id="s4-3"><title>Practitioners Are Uncertain About Recommending Generative AI Chatbots to Clients</title><p>This study found no clear preference for or against recommending a chatbot to clients, with uncertainty evident in both quantitative and qualitative results. Both the frequentist and Bayesian analyses reflected statistical uncertainty regarding the true proportion of participants who were likely to recommend a chatbot to clients. Qualitative analysis highlighted several ethical concerns, ranging from professional liability to confidentiality and data security, and a lack of familiarity with the technology. This aligns with previous research, which found that practitioners often harbor ethical concerns and lack education on the appropriate use of generative AI tools in practice [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref32">32</xref>].</p><p>This uncertainty contrasts with Sweeney et al [<xref ref-type="bibr" rid="ref31">31</xref>], where 80% of practitioners were likely to recommend a chatbot to a client. This variance may be explained by the difference in how questions were phrased. In Sweeney et al [<xref ref-type="bibr" rid="ref31">31</xref>] study, participants were asked whether they would recommend a chatbot &#x201C;in the next five years,&#x201D; allowing practitioners to consider how the technology, and their understanding of it, may evolve. In contrast, the present study focused on immediate likelihood, which may have biased participants toward considering current limitations. Participants also expressed optimism about the future of generative AI, suggesting they may have been more supportive if asked to consider their likelihood of recommending a chatbot in the future.</p></sec><sec id="s4-4"><title>Practitioners&#x2019; Use of Generative AI Chatbots May Be Conditional on the Client</title><p>A unique insight from this study was practitioners&#x2019; support for conditional use of generative AI chatbots, depending on factors unique to the client. Qualitative responses indicated that participants were more inclined to recommend chatbots for low-risk, less complex cases (ie, stress, sleep issues, and life transitions) compared to high-risk (ie, suicidal intent) or trauma-related cases. The existing literature on the appropriate client profiles for generative AI chatbots presents mixed views. Some propose that AI is appropriate for use with clients with complex clinical histories, on the basis that the technology can analyze and track data on, for example, genetic markers and behavioral patterns [<xref ref-type="bibr" rid="ref39">39</xref>]. Others suggest that AI chatbots may be most appropriate in prevention and psychoeducation in low-risk populations [<xref ref-type="bibr" rid="ref40">40</xref>]. Ultimately, like any mental health treatment, the success of generative AI tools will depend not only on the technology, but also on its fit for the client.</p></sec><sec id="s4-5"><title>Exposure Increases Recommendation Likelihood and May Heighten Client Safety Concerns</title><p>This study found that participants&#x2019; likelihood of recommending a generative AI chatbot to a client increased significantly after a demonstration. Qualitative insights provided depth to the quantitative results, as participants explained that the demonstration had increased their understanding of how chatbots can be applied in mental health care. This result aligns with earlier findings of a positive correlation between health care practitioners&#x2019; knowledge of generative AI chatbots and positive attitudes toward the technology [<xref ref-type="bibr" rid="ref41">41</xref>]. Similarly, previous studies have found that negative perceptions of AI among mental health professionals may be due to minimal exposure [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref42">42</xref>].</p><p>Alternatively, participants&#x2019; increased recommendation likelihood postdemonstration may be reflective of novelty bias, where individuals tend to exhibit increased enthusiasm toward a new or unfamiliar technology or idea [<xref ref-type="bibr" rid="ref43">43</xref>]. Qualitative data suggests that for 22 out of 23 participants (96%), the demonstration was the first time they had seen a generative AI chatbot used in a mental health setting. In a similar study by Liu et al [<xref ref-type="bibr" rid="ref16">16</xref>], it was observed that participants responded more positively to a chatbot trained to deliver positive psychology interventions during initial interactions.</p><p>Surprisingly, despite the quantitative increase in recommendation likelihood, qualitative responses indicated that the demonstration heightened client safety concerns for some participants. Of the ten participants who reported an increased recommendation likelihood, 4 described being more concerned about the risks. This suggests that a greater understanding of the associated risks of using generative AI chatbots may play a role in adoption among practitioners. This aligns with results from Zhang et al [<xref ref-type="bibr" rid="ref42">42</xref>], where participants described the importance of understanding the risks of AI to &#x201C;clinician buy-in&#x201D; and adoption of the technology in practice. While an examination of potential risks was beyond the scope of this study, this finding underscores the importance of providing evidence-based, objective education to practitioners.</p></sec><sec id="s4-6"><title>Limitations and Future Directions for Research</title><p>Several limitations of this study should be addressed. First, convenience sampling may have introduced selection bias, as individuals who were more supportive of AI technologies may have been more likely to participate, leading to an overrepresentation of positive attitudes. Future studies should apply random sampling methods to reduce selection bias, ensure representativeness, and improve external validity [<xref ref-type="bibr" rid="ref44">44</xref>]. In addition, participants may have been aware that the interviewers created the chatbot demonstration, introducing demand characteristics and potential bias toward more favorable responses. The use of independent researchers to administer questionnaires may assist in mitigating potential bias in future research.</p><p>A primary focus of this research was to gather in-depth qualitative data to allow for greater exploration of practitioners&#x2019; perspectives. Due to project constraints and the time required of participants, the study sample size was small (N=23). While this number is appropriate for qualitative thematic research [<xref ref-type="bibr" rid="ref45">45</xref>], it limited the statistical power and reliability of the quantitative analyses [<xref ref-type="bibr" rid="ref46">46</xref>]. No a priori power calculation was conducted, as the study was primarily exploratory and qualitatively focused. In addition, the sample was Australian, predominantly female (17/23, 74%) and mostly psychologists and counselors (13/23, 56%). Caution should be exercised when generalizing these findings to other health systems or sociocultural contexts. In the future, researchers should recruit larger, more representative samples in different cultural contexts, while still maintaining the qualitative analysis that is crucial for understanding the nuanced perspectives of practitioners.</p><p>Participants were asked to rate their likelihood of recommending a chatbot to a client on a 4-point Likert scale. While the absence of a neutral midpoint encouraged participants to share their perspectives, it may have forced practitioners to choose a position that they did not fully support.</p><p>This study did not consider recent developments in generative AI chatbot technology, such as voice recognition or more sophisticated emotional detection capabilities [<xref ref-type="bibr" rid="ref47">47</xref>]. Researchers may consider partnering with AI developers to test emerging technologies to ensure that studies remain relevant, while also allowing for practitioner and user feedback on tools before they are released to market.</p></sec><sec id="s4-7"><title>Practical Implications</title><p>For practitioners, the findings suggest that generative AI chatbots could be valuable in alleviating their administrative burden. However, the study also highlights the need for careful consideration as to how AI is applied and the need for practitioners to improve their understanding of AI chatbots, particularly as their clients may already be using them [<xref ref-type="bibr" rid="ref22">22</xref>].</p><p>Educational institutions and industry associations play an important role in ensuring practitioners have appropriate AI skills and literacy and are prepared to deliver services in a field where technology is playing an increasingly important role [<xref ref-type="bibr" rid="ref48">48</xref>]. Practical, evidence-based guidelines for the use of AI tools should address concerns expressed in this study regarding liability, client safety, and ethics, while also enabling effective use of technology to support positive mental health outcomes. Practitioners should also be informed of emerging research so as to help validate their decision-making and points of view.</p><p>For AI developers, insights from this study may support iteration and refinement of generative AI chatbots for mental health care. For example, developers should ensure practitioners&#x2019; concerns regarding client safety, data security, and accuracy are addressed in AI tools and service agreements. This study also highlights the need for developers to engage practitioners, as well as users, throughout the design process to support safe and effective technology implementation.</p></sec><sec id="s4-8"><title>Conclusion</title><p>Generative AI chatbots have significant potential to play an important role in the delivery of mental health care. In the immediate term, their greatest value may be in administrative, nonclient-facing tasks, to alleviate practitioner workload and allow greater attention to be directed to therapeutic work. This study highlighted the complexity and nuance of practitioners&#x2019; perspectives on generative AI chatbots. While there is interest and optimism, there is a lack of familiarity with AI tools and uncertainty regarding client-facing uses. Furthermore, exposure to AI technology may play an important role in supporting adoption among practitioners.</p><p>Future research should continue to explore practitioners&#x2019; views on administrative and client-facing uses with larger, more representative samples to enhance the reliability and generalizability of findings. It would also be beneficial to explore hybrid models and the benefits of therapist-AI collaboration. Such evidence could inform objective guidelines to allow practitioners to make informed decisions on their use of generative AI chatbots. Ultimately, this study provides valuable contributions as to &#x201C;how,&#x201D; rather than simply &#x201C;whether,&#x201D; generative AI chatbots may be integrated into mental health care, guiding more thoughtful, stakeholder-informed implementation.</p></sec></sec></body><back><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AAPi</term><def><p>Australian Association of Psychologists Inc</p></def></def-item><def-item><term id="abb2">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb3">APS</term><def><p>Australian Psychological Society</p></def></def-item><def-item><term id="abb4">PTSD</term><def><p>posttraumatic stress disorder</p></def></def-item><def-item><term id="abb5">QT</term><def><p>qualitative theme</p></def></def-item><def-item><term id="abb6">RQ</term><def><p>research question</p></def></def-item><def-item><term id="abb7">T1</term><def><p>recommending a chatbot before demonstration</p></def></def-item><def-item><term id="abb8">T2</term><def><p>recommending a chatbot after demonstration</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kupcova</surname><given-names>I</given-names> </name><name name-style="western"><surname>Danisovic</surname><given-names>L</given-names> </name><name name-style="western"><surname>Klein</surname><given-names>M</given-names> </name><name name-style="western"><surname>Harsanyi</surname><given-names>S</given-names> </name></person-group><article-title>Effects of the COVID-19 pandemic on mental health, anxiety, and depression</article-title><source>BMC Psychol</source><year>2023</year><volume>11</volume><issue>1</issue><pub-id pub-id-type="doi">10.1186/s40359-023-01130-5</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="web"><article-title>Mental health services</article-title><source>Australian Institute of Health and Welfare</source><year>2024</year><month>04</month><day>30</day><access-date>2024-10-05</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.aihw.gov.au/mental-health/overview/mental-health-services">https://www.aihw.gov.au/mental-health/overview/mental-health-services</ext-link></comment></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="report"><person-group person-group-type="author"><collab>Mental Health Australia</collab></person-group><article-title>Report to the nation 2023</article-title><access-date>2024-05-15</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://mhaustralia.org/sites/default/files/docs/report_to_the_nation_2023.pdf">https://mhaustralia.org/sites/default/files/docs/report_to_the_nation_2023.pdf</ext-link></comment></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fitzpatrick</surname><given-names>KK</given-names> </name><name name-style="western"><surname>Darcy</surname><given-names>A</given-names> </name><name name-style="western"><surname>Vierhile</surname><given-names>M</given-names> </name></person-group><article-title>Delivering cognitive behavior therapy to young adults with symptoms of depression and anxiety using a fully automated conversational agent (Woebot): a randomized controlled trial</article-title><source>JMIR Ment Health</source><year>2017</year><month>06</month><day>6</day><volume>4</volume><issue>2</issue><fpage>e19</fpage><pub-id pub-id-type="doi">10.2196/mental.7785</pub-id><pub-id pub-id-type="medline">28588005</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Vaidyam</surname><given-names>AN</given-names> </name><name name-style="western"><surname>Wisniewski</surname><given-names>H</given-names> </name><name name-style="western"><surname>Halamka</surname><given-names>JD</given-names> </name><name name-style="western"><surname>Kashavan</surname><given-names>MS</given-names> </name><name name-style="western"><surname>Torous</surname><given-names>JB</given-names> </name></person-group><article-title>Chatbots and conversational agents in mental health: a review of the psychiatric landscape</article-title><source>Can J Psychiatry</source><year>2019</year><month>07</month><volume>64</volume><issue>7</issue><fpage>456</fpage><lpage>464</lpage><pub-id pub-id-type="doi">10.1177/0706743719828977</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="book"><person-group person-group-type="editor"><name name-style="western"><surname>Maglogiannis</surname><given-names>I</given-names> </name><name name-style="western"><surname>Iliadis</surname><given-names>L</given-names> </name><name name-style="western"><surname>Pimenidis</surname><given-names>E</given-names></name></person-group><article-title>An overview of chatbot technology</article-title><source>IFIP Advances in Information and Communication Technology (AIAI 2020 Conference Proceedings)</source><year>2020</year><publisher-name>Springer</publisher-name><fpage>373</fpage><lpage>383</lpage><pub-id pub-id-type="doi">10.1007/978-3-030-49186-4_31</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bird</surname><given-names>T</given-names> </name><name name-style="western"><surname>Mansell</surname><given-names>W</given-names> </name><name name-style="western"><surname>Wright</surname><given-names>J</given-names> </name><name name-style="western"><surname>Gaffney</surname><given-names>H</given-names> </name><name name-style="western"><surname>Tai</surname><given-names>S</given-names> </name></person-group><article-title>Manage your life online: a web-based randomized controlled trial evaluating the effectiveness of a problem-solving intervention in a student sample</article-title><source>Behav Cogn Psychother</source><year>2018</year><month>09</month><volume>46</volume><issue>5</issue><fpage>570</fpage><lpage>582</lpage><pub-id pub-id-type="doi">10.1017/S1352465817000820</pub-id><pub-id pub-id-type="medline">29366432</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Enos</surname><given-names>G</given-names> </name></person-group><article-title>Digital mental health agent for youths shows results similar to therapy group</article-title><source>Mental Health Weekly</source><year>2023</year><month>11</month><day>6</day><volume>33</volume><issue>43</issue><fpage>1</fpage><lpage>5</lpage><pub-id pub-id-type="doi">10.1002/mhw.33846</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fulmer</surname><given-names>R</given-names> </name><name name-style="western"><surname>Joerin</surname><given-names>A</given-names> </name><name name-style="western"><surname>Gentile</surname><given-names>B</given-names> </name><name name-style="western"><surname>Lakerink</surname><given-names>L</given-names> </name><name name-style="western"><surname>Rauws</surname><given-names>M</given-names> </name></person-group><article-title>Using psychological artificial intelligence (Tess) to relieve symptoms of depression and anxiety: randomized controlled trial</article-title><source>JMIR Ment Health</source><year>2018</year><month>12</month><day>13</day><volume>5</volume><issue>4</issue><fpage>e64</fpage><pub-id pub-id-type="doi">10.2196/mental.9782</pub-id><pub-id pub-id-type="medline">30545815</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Alanezi</surname><given-names>F</given-names> </name></person-group><article-title>Assessing the effectiveness of ChatGPT in delivering mental health support: a qualitative study</article-title><source>J Multidiscip Healthc</source><year>2024</year><volume>17</volume><fpage>461</fpage><lpage>471</lpage><pub-id pub-id-type="doi">10.2147/JMDH.S447368</pub-id><pub-id pub-id-type="medline">38314011</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Co&#x015F;kun</surname><given-names>AB</given-names> </name><name name-style="western"><surname>Elmao&#x011F;lu</surname><given-names>E</given-names> </name><name name-style="western"><surname>Buran</surname><given-names>C</given-names> </name><name name-style="western"><surname>Y&#x00FC;zer Alsa&#x00E7;</surname><given-names>S</given-names> </name></person-group><article-title>Integration of Chatgpt and E-Health Literacy: Opportunities, Challenges, and a Look Towards the Future</article-title><source>Journal of Health Reports and Technology</source><volume>10</volume><issue>1</issue><pub-id pub-id-type="doi">10.5812/jhrt-139748</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Yilanli</surname><given-names>M</given-names> </name><name name-style="western"><surname>McKay</surname><given-names>I</given-names> </name><name name-style="western"><surname>Jackson</surname><given-names>DI</given-names> </name><name name-style="western"><surname>Sezgin</surname><given-names>E</given-names> </name></person-group><article-title>Large language models for individualized psychoeducational tools for psychosis: a cross-sectional study</article-title><source>medRxiv</source><comment>Preprint posted online on  Jul 29, 2024</comment><pub-id pub-id-type="doi">10.1101/2024.07.26.24311075</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Maurya</surname><given-names>RK</given-names> </name><name name-style="western"><surname>Montesinos</surname><given-names>S</given-names> </name><name name-style="western"><surname>Bogomaz</surname><given-names>M</given-names> </name><name name-style="western"><surname>DeDiego</surname><given-names>AC</given-names> </name></person-group><article-title>Assessing the use of ChatGPT as a psychoeducational tool for mental health practice</article-title><source>Couns and Psychother Res</source><year>2025</year><month>03</month><volume>25</volume><issue>1</issue><pub-id pub-id-type="doi">10.1002/capr.12759</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yasukawa</surname><given-names>S</given-names> </name><name name-style="western"><surname>Tanaka</surname><given-names>T</given-names> </name><name name-style="western"><surname>Yamane</surname><given-names>K</given-names> </name><etal/></person-group><article-title>A chatbot to improve adherence to internet-based cognitive&#x2013;behavioural therapy among workers with subthreshold depression: a randomised controlled trial</article-title><source>BMJ Ment Health</source><year>2024</year><month>01</month><volume>27</volume><issue>1</issue><fpage>e300881</fpage><pub-id pub-id-type="doi">10.1136/bmjment-2023-300881</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Li</surname><given-names>S</given-names> </name></person-group><article-title>Tech vs. tradition: ChatGPT and mindfulness in enhancing older adults&#x2019; emotional health</article-title><source>Behav Sci (Basel)</source><year>2024</year><volume>14</volume><issue>10</issue><fpage>923</fpage><pub-id pub-id-type="doi">10.3390/bs14100923</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>I</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>F</given-names> </name><name name-style="western"><surname>Xiao</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Ni</surname><given-names>S</given-names> </name></person-group><article-title>Investigating the key success factors of chatbot-based positive psychology intervention with retrieval- and generative pre-trained transformer (GPT)-based chatbots</article-title><source>International Journal of Human&#x2013;Computer Interaction</source><year>2025</year><month>01</month><day>2</day><volume>41</volume><issue>1</issue><fpage>341</fpage><lpage>352</lpage><pub-id pub-id-type="doi">10.1080/10447318.2023.2300015</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Davenport</surname><given-names>T</given-names> </name><name name-style="western"><surname>Kalakota</surname><given-names>R</given-names> </name></person-group><article-title>The potential for artificial intelligence in healthcare</article-title><source>Future Healthc J</source><year>2019</year><month>06</month><volume>6</volume><issue>2</issue><fpage>94</fpage><lpage>98</lpage><pub-id pub-id-type="doi">10.7861/futurehosp.6-2-94</pub-id><pub-id pub-id-type="medline">31363513</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Townsend</surname><given-names>BA</given-names> </name><name name-style="western"><surname>Plant</surname><given-names>KL</given-names> </name><name name-style="western"><surname>Hodge</surname><given-names>VJ</given-names> </name><name name-style="western"><surname>Ashaolu</surname><given-names>OT</given-names> </name><name name-style="western"><surname>Calinescu</surname><given-names>R</given-names> </name></person-group><article-title>Medical practitioner perspectives on AI in emergency triage</article-title><source>Front Digit Health</source><year>2023</year><volume>5</volume><fpage>1297073</fpage><pub-id pub-id-type="doi">10.3389/fdgth.2023.1297073</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cross</surname><given-names>S</given-names> </name><name name-style="western"><surname>Bell</surname><given-names>I</given-names> </name><name name-style="western"><surname>Nicholas</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Use of AI in mental health care: community and mental health professionals survey</article-title><source>JMIR Ment Health</source><year>2024</year><month>10</month><day>11</day><volume>11</volume><fpage>e60589</fpage><pub-id pub-id-type="doi">10.2196/60589</pub-id><pub-id pub-id-type="medline">39392869</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sharma</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lin</surname><given-names>IW</given-names> </name><name name-style="western"><surname>Miner</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Atkins</surname><given-names>DC</given-names> </name><name name-style="western"><surname>Althoff</surname><given-names>T</given-names> </name></person-group><article-title>Human&#x2013;AI collaboration enables more empathic conversations in text-based peer-to-peer mental health support</article-title><source>Nat Mach Intell</source><year>2023</year><volume>5</volume><issue>1</issue><fpage>46</fpage><lpage>57</lpage><pub-id pub-id-type="doi">10.1038/s42256-022-00593-2</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Baker</surname><given-names>HP</given-names> </name><name name-style="western"><surname>Dwyer</surname><given-names>E</given-names> </name><name name-style="western"><surname>Kalidoss</surname><given-names>S</given-names> </name><name name-style="western"><surname>Hynes</surname><given-names>K</given-names> </name><name name-style="western"><surname>Wolf</surname><given-names>J</given-names> </name><name name-style="western"><surname>Strelzow</surname><given-names>JA</given-names> </name></person-group><article-title>ChatGPT&#x2019;s ability to assist with clinical documentation: a randomized controlled trial</article-title><source>J Am Acad Orthop Surg</source><year>2024</year><month>02</month><day>1</day><volume>32</volume><issue>3</issue><fpage>123</fpage><lpage>129</lpage><pub-id pub-id-type="doi">10.5435/JAAOS-D-23-00474</pub-id><pub-id pub-id-type="medline">37976385</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dergaa</surname><given-names>I</given-names> </name><name name-style="western"><surname>Fekih-Romdhane</surname><given-names>F</given-names> </name><name name-style="western"><surname>Hallit</surname><given-names>S</given-names> </name><etal/></person-group><article-title>ChatGPT is not ready yet for use in providing mental health assessment and interventions</article-title><source>Front Psychiatry</source><year>2023</year><volume>14</volume><fpage>1277756</fpage><pub-id pub-id-type="doi">10.3389/fpsyt.2023.1277756</pub-id><pub-id pub-id-type="medline">38239905</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bartal</surname><given-names>A</given-names> </name><name name-style="western"><surname>Jagodnik</surname><given-names>KM</given-names> </name><name name-style="western"><surname>Chan</surname><given-names>SJ</given-names> </name><name name-style="western"><surname>Dekel</surname><given-names>S</given-names> </name></person-group><article-title>AI and narrative embeddings detect PTSD following childbirth via birth stories</article-title><source>Sci Rep</source><year>2024</year><volume>14</volume><issue>1</issue><fpage>54242</fpage><pub-id pub-id-type="doi">10.1038/s41598-024-54242-2</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Guest</surname><given-names>R</given-names> </name><name name-style="western"><surname>Tran</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Gopinath</surname><given-names>B</given-names> </name><name name-style="western"><surname>Cameron</surname><given-names>ID</given-names> </name><name name-style="western"><surname>Craig</surname><given-names>A</given-names> </name></person-group><article-title>Prevalence and psychometric screening for the detection of major depressive disorder and post-traumatic stress disorder in adults injured in a motor vehicle crash who are engaged in compensation</article-title><source>BMC Psychol</source><year>2018</year><month>02</month><day>21</day><volume>6</volume><issue>1</issue><fpage>4</fpage><pub-id pub-id-type="doi">10.1186/s40359-018-0216-5</pub-id><pub-id pub-id-type="medline">29467035</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Maurer</surname><given-names>DM</given-names> </name><name name-style="western"><surname>Raymond</surname><given-names>TJ</given-names> </name><name name-style="western"><surname>Davis</surname><given-names>BN</given-names> </name></person-group><article-title>Depression: screening and diagnosis</article-title><source>Am Fam Physician</source><year>2018</year><month>10</month><day>15</day><volume>98</volume><issue>8</issue><fpage>508</fpage><lpage>515</lpage><pub-id pub-id-type="medline">30277728</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Galido</surname><given-names>PV</given-names> </name><name name-style="western"><surname>Butala</surname><given-names>S</given-names> </name><name name-style="western"><surname>Chakerian</surname><given-names>M</given-names> </name><name name-style="western"><surname>Agustines</surname><given-names>D</given-names> </name></person-group><article-title>A case study demonstrating applications of ChatGPT in the clinical management of treatment-resistant schizophrenia</article-title><source>Cureus</source><year>2023</year><month>04</month><volume>15</volume><issue>4</issue><fpage>e38166</fpage><pub-id pub-id-type="doi">10.7759/cureus.38166</pub-id><pub-id pub-id-type="medline">37252576</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pandya</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lodha</surname><given-names>P</given-names> </name><name name-style="western"><surname>Ganatra</surname><given-names>A</given-names> </name></person-group><article-title>Is ChatGPT ready to change mental healthcare? Challenges and considerations: a reality-check</article-title><source>Front Hum Dyn</source><year>2024</year><volume>5</volume><pub-id pub-id-type="doi">10.3389/fhumd.2023.1289255</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rogan</surname><given-names>J</given-names> </name><name name-style="western"><surname>Bucci</surname><given-names>S</given-names> </name><name name-style="western"><surname>Firth</surname><given-names>J</given-names> </name></person-group><article-title>Health care professionals&#x2019; views on the use of passive sensing, AI, and machine learning in mental health care: systematic review with meta-synthesis</article-title><source>JMIR Ment Health</source><year>2024</year><month>01</month><day>23</day><volume>11</volume><fpage>e49577</fpage><pub-id pub-id-type="doi">10.2196/49577</pub-id><pub-id pub-id-type="medline">38261403</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Blease</surname><given-names>C</given-names> </name><name name-style="western"><surname>Worthen</surname><given-names>A</given-names> </name><name name-style="western"><surname>Torous</surname><given-names>J</given-names> </name></person-group><article-title>Psychiatrists&#x2019; experiences and opinions of generative artificial intelligence in mental healthcare: An online mixed methods survey</article-title><source>Psychiatry Res</source><year>2024</year><month>03</month><volume>333</volume><fpage>115724</fpage><pub-id pub-id-type="doi">10.1016/j.psychres.2024.115724</pub-id><pub-id pub-id-type="medline">38244285</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Wan</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Ni</surname><given-names>C</given-names> </name><etal/></person-group><article-title>A systematic review of chatgpt and other conversational large language models in healthcare</article-title><source>Health Informatics</source><comment>Preprint posted online on 2024</comment><pub-id pub-id-type="doi">10.1101/2024.04.26.24306390</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sweeney</surname><given-names>C</given-names> </name><name name-style="western"><surname>Potts</surname><given-names>C</given-names> </name><name name-style="western"><surname>Ennis</surname><given-names>E</given-names> </name><etal/></person-group><article-title>Can chatbots help support a person&#x2019;s mental health? Perceptions and views from mental healthcare professionals and experts</article-title><source>ACM Trans Comput Healthcare</source><year>2021</year><month>07</month><day>31</day><volume>2</volume><issue>3</issue><fpage>1</fpage><lpage>15</lpage><pub-id pub-id-type="doi">10.1145/3453175</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Blease</surname><given-names>C</given-names> </name><name name-style="western"><surname>Kharko</surname><given-names>A</given-names> </name><name name-style="western"><surname>Annoni</surname><given-names>M</given-names> </name><name name-style="western"><surname>Gaab</surname><given-names>J</given-names> </name><name name-style="western"><surname>Locher</surname><given-names>C</given-names> </name></person-group><article-title>Machine learning in clinical psychology and psychotherapy education: a mixed methods pilot survey of postgraduate students at a Swiss University</article-title><source>Front Public Health</source><year>2021</year><volume>9</volume><fpage>623088</fpage><pub-id pub-id-type="doi">10.3389/fpubh.2021.623088</pub-id><pub-id pub-id-type="medline">33898374</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Torous</surname><given-names>J</given-names> </name><name name-style="western"><surname>Bucci</surname><given-names>S</given-names> </name><name name-style="western"><surname>Bell</surname><given-names>IH</given-names> </name><etal/></person-group><article-title>The growing field of digital psychiatry: current evidence and the future of apps, social media, chatbots, and virtual reality</article-title><source>World Psychiatry</source><year>2021</year><month>10</month><volume>20</volume><issue>3</issue><fpage>318</fpage><lpage>335</lpage><pub-id pub-id-type="doi">10.1002/wps.20883</pub-id><pub-id pub-id-type="medline">34505369</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Braun</surname><given-names>V</given-names> </name><name name-style="western"><surname>Clarke</surname><given-names>V</given-names> </name></person-group><article-title>Using thematic analysis in psychology</article-title><source>Qual Res Psychol</source><year>2006</year><month>01</month><volume>3</volume><issue>2</issue><fpage>77</fpage><lpage>101</lpage><pub-id pub-id-type="doi">10.1191/1478088706qp063oa</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Cohen</surname><given-names>J</given-names> </name></person-group><source>Statistical Power Analysis for the Behavioral Sciences</source><year>1988</year><edition>2</edition><publisher-name>Lawrence Erlbaum Associates</publisher-name></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="web"><source>Upheal</source><access-date>2024-10-20</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.upheal.io">https://www.upheal.io</ext-link></comment></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="web"><source>Heidi Health</source><access-date>2024-10-20</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.heidihealth.com/au">https://www.heidihealth.com/au</ext-link></comment></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="web"><source>Autonotes</source><access-date>2024-10-20</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://autonotes.ai">https://autonotes.ai</ext-link></comment></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Olawade</surname><given-names>DB</given-names> </name><name name-style="western"><surname>Wada</surname><given-names>OZ</given-names> </name><name name-style="western"><surname>Odetayo</surname><given-names>A</given-names> </name><name name-style="western"><surname>David-Olawade</surname><given-names>AC</given-names> </name><name name-style="western"><surname>Asaolu</surname><given-names>F</given-names> </name><name name-style="western"><surname>Eberhardt</surname><given-names>J</given-names> </name></person-group><article-title>Enhancing mental health with artificial intelligence: current trends and future prospects</article-title><source>Journal of Medicine, Surgery, and Public Health</source><year>2024</year><month>08</month><volume>3</volume><fpage>100099</fpage><pub-id pub-id-type="doi">10.1016/j.glmedi.2024.100099</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ettman</surname><given-names>CK</given-names> </name><name name-style="western"><surname>Galea</surname><given-names>S</given-names> </name></person-group><article-title>The potential influence of AI on population mental health</article-title><source>JMIR Ment Health</source><year>2023</year><month>11</month><day>16</day><volume>10</volume><fpage>e49936</fpage><pub-id pub-id-type="doi">10.2196/49936</pub-id><pub-id pub-id-type="medline">37971803</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Li</surname><given-names>Z</given-names> </name></person-group><article-title>Knowledge, attitude, and practices regarding ChatGPT among health care professionals</article-title><source>Am J Manag Care</source><year>2024</year><month>09</month><day>1</day><volume>30</volume><issue>9</issue><fpage>e258</fpage><lpage>e265</lpage><pub-id pub-id-type="doi">10.37765/ajmc.2024.89604</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>M</given-names> </name><name name-style="western"><surname>Scandiffio</surname><given-names>J</given-names> </name><name name-style="western"><surname>Younus</surname><given-names>S</given-names> </name><etal/></person-group><article-title>The adoption of AI in mental health care-perspectives from mental health professionals: qualitative descriptive study</article-title><source>JMIR Form Res</source><year>2023</year><month>12</month><day>7</day><volume>7</volume><fpage>e47847</fpage><pub-id pub-id-type="doi">10.2196/47847</pub-id><pub-id pub-id-type="medline">38060307</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Mirnig</surname><given-names>A</given-names> </name><name name-style="western"><surname>Gaertner</surname><given-names>M</given-names> </name><name name-style="western"><surname>Meschtscherjakov</surname><given-names>A</given-names> </name><name name-style="western"><surname>Tscheligi</surname><given-names>M</given-names> </name></person-group><article-title>Blinded by novelty: a reflection on participant curiosity and novelty in automated vehicle studies based on experiences from the field</article-title><year>2020</year><conf-name>Association for Computing Machinery</conf-name><conf-date>Sep 6-9, 2020</conf-date><conf-loc>Magdeburg, Germany</conf-loc><pub-id pub-id-type="doi">10.1145/3404983.3405593</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Andrade</surname><given-names>C</given-names> </name></person-group><article-title>The inconvenient truth about convenience and purposive samples</article-title><source>Indian J Psychol Med</source><year>2021</year><month>01</month><volume>43</volume><issue>1</issue><fpage>86</fpage><lpage>88</lpage><pub-id pub-id-type="doi">10.1177/0253717620977000</pub-id><pub-id pub-id-type="medline">34349313</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Guest</surname><given-names>G</given-names> </name><name name-style="western"><surname>Bunce</surname><given-names>A</given-names> </name><name name-style="western"><surname>Johnson</surname><given-names>L</given-names> </name></person-group><article-title>How many interviews are enough? An experiment with data saturation and variability</article-title><source>Field methods</source><year>2006</year><volume>18</volume><issue>1</issue><fpage>59</fpage><lpage>82</lpage><pub-id pub-id-type="doi">10.1177/1525822X05279903</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Button</surname><given-names>KS</given-names> </name><name name-style="western"><surname>Ioannidis</surname><given-names>JPA</given-names> </name><name name-style="western"><surname>Mokrysz</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Power failure: why small sample size undermines the reliability of neuroscience</article-title><source>Nat Rev Neurosci</source><year>2013</year><month>05</month><volume>14</volume><issue>5</issue><fpage>365</fpage><lpage>376</lpage><pub-id pub-id-type="doi">10.1038/nrn3475</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="web"><person-group person-group-type="author"><collab>OpenAI</collab></person-group><source>ChatGPT can now see, hear, and speak OpenAI website</source><year>2023</year><month>09</month><day>25</day><access-date>2024-10-18</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://openai.com/index/chatgpt-can-now-see-hear-and-speak">https://openai.com/index/chatgpt-can-now-see-hear-and-speak</ext-link></comment></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rajaei</surname><given-names>A</given-names> </name></person-group><article-title>Teaching in the age of AI/ChatGPT in mental-health-related fields</article-title><source>The Family Journal</source><year>2024</year><month>01</month><volume>32</volume><issue>1</issue><fpage>6</fpage><lpage>10</lpage><pub-id pub-id-type="doi">10.1177/10664807231209721</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Interview questions and interviewer script.</p><media xlink:href="humanfactors_v12i1e71065_app1.docx" xlink:title="DOCX File, 19 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Demonstration transcript and link.</p><media xlink:href="humanfactors_v12i1e71065_app2.docx" xlink:title="DOCX File, 27 KB"/></supplementary-material></app-group></back></article>