<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Hum Factors</journal-id><journal-id journal-id-type="publisher-id">humanfactors</journal-id><journal-id journal-id-type="index">6</journal-id><journal-title>JMIR Human Factors</journal-title><abbrev-journal-title>JMIR Hum Factors</abbrev-journal-title><issn pub-type="epub">2292-9495</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v12i1e69144</article-id><article-id pub-id-type="doi">10.2196/69144</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Building and Beta-Testing Be Well Buddy Chatbot, a Secure, Credible and Trustworthy AI Chatbot That Will Not Misinform, Hallucinate or Stigmatize Substance Use Disorder: Development and Usability Study</article-title></title-group><contrib-group><contrib contrib-type="author"><name name-style="western"><surname>Salyers</surname><given-names>Adam Jerome</given-names></name><degrees>BA</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Bull</surname><given-names>Sheana</given-names></name><degrees>MPH, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Silvasstar</surname><given-names>Joshva</given-names></name><degrees>MSIS, BS</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Howell</surname><given-names>Kevin</given-names></name><degrees>MSIS</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Wright</surname><given-names>Tara</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Banaei-Kashani</surname><given-names>Farnoush</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib></contrib-group><aff id="aff1"><institution>Clinic Chat, LLC</institution><addr-line>2950 Arkins Ct, Unit 605</addr-line><addr-line>Denver</addr-line><addr-line>CO</addr-line><country>United States</country></aff><aff id="aff2"><institution>University of Texas Health Sciences at San Antonio</institution><addr-line>San Antonio</addr-line><addr-line>TX</addr-line><country>United States</country></aff><aff id="aff3"><institution>Department of Computer Science and Engineering, University of Colorado</institution><addr-line>Denver</addr-line><addr-line>CO</addr-line><country>United States</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Choudhury</surname><given-names>Avishek</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Krishnapatnam</surname><given-names>Mahendra</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Kumar</surname><given-names>Santhosh</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Sheana Bull, MPH, PhD, Clinic Chat, LLC, 2950 Arkins Ct, Unit 605, Denver, CO, 80216, United States, 1 3038079800; <email>sheana.bull@clinicchat.com</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>7</day><month>5</month><year>2025</year></pub-date><volume>12</volume><elocation-id>e69144</elocation-id><history><date date-type="received"><day>22</day><month>11</month><year>2024</year></date><date date-type="rev-recd"><day>04</day><month>03</month><year>2025</year></date><date date-type="accepted"><day>06</day><month>03</month><year>2025</year></date></history><copyright-statement>&#x00A9; Adam Jerome Salyers, Sheana Bull, Joshva Silvasstar, Kevin Howell, Tara Wright, Farnoush Banaei-Kashani. Originally published in JMIR Human Factors (<ext-link ext-link-type="uri" xlink:href="https://humanfactors.jmir.org">https://humanfactors.jmir.org</ext-link>), 7.5.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Human Factors, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://humanfactors.jmir.org">https://humanfactors.jmir.org</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://humanfactors.jmir.org/2025/1/e69144"/><abstract><sec><title>Background</title><p>Artificially intelligent (AI) chatbots that deploy natural language processing and machine learning are becoming more common in health care to facilitate patient education and outreach; however, generative chatbots such as ChatGPT face challenges, as they can misinform and hallucinate. Health care systems are increasingly interested in using these tools for patient education, access to care, and self-management, but need reassurances that AI systems can be secure and credible.</p></sec><sec><title>Objective</title><p>This study aimed to build a secure system that people can use to send SMS with questions about substance use, and which can be used to screen for substance use disorder (SUD). The system will rely on data transfer via third party vendors and will thus require reliable and trustworthy encryption of protected health information .</p></sec><sec sec-type="methods"><title>Methods</title><p>We describe the process and specifications for building an AI chatbot that users can access to gain information on and screen for SUD from Be Well Texas, a clinical provider affiliated with the University of Texas Health Sciences Center at San Antonio.</p></sec><sec sec-type="results"><title>Results</title><p>The AI chatbot system uses natural language processing and machine learning to classify expert-curated content related to SUD. It illustrates how we can comply with best practices in HIPPA (Health Insurance Portability and Accountability Act) compliance in data encryption for data transfer and data at rest, while still offering a state-of-the-art system that uses dynamic, user-driven conversation to dialogue about SUD, screen for SUD and access SUD treatment services.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Recent calls for attention to user-friendly design concerning user rights that honor digital rights and regulations for digital substance use offerings suggest that this study is timely and appropriate while still advancing the field of AI.</p></sec><sec><title>Trial Registration</title><p>Not Applicable</p></sec></abstract><kwd-group><kwd>artificial intelligence</kwd><kwd>chatbot</kwd><kwd>infrastructure</kwd><kwd>substance use disorder</kwd><kwd>digital health</kwd><kwd>health communication</kwd><kwd>conversational agent</kwd><kwd>HIPAA</kwd><kwd>AI</kwd><kwd>Healthcare Insurance Portability and Accountability Act</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Given the ongoing opioid epidemic in the United States [<xref ref-type="bibr" rid="ref1">1</xref>], consideration of how and whether we can use digital tools to facilitate a response is warranted. One way that digital solutions may be deployed is by using them to scale screening for substance use disorder (SUD). Screening for SUD is critical to identify people at risk [<xref ref-type="bibr" rid="ref2">2</xref>], as it can lead to referrals for services including outpatient as well as medically managed intensive inpatient services [<xref ref-type="bibr" rid="ref3">3</xref>]. Although Screening, Brief Intervention, and Referral to Treatment (SBIRT) programs have been widely used to facilitate screening [<xref ref-type="bibr" rid="ref4">4</xref>], and these programs are available electronically [<xref ref-type="bibr" rid="ref5">5</xref>], there is still a major gap in screening, given current estimates indicate fewer than 10% of those at risk for SUD ever screened. This exacerbates an already costly response to the opioid epidemic, given that the United States spends more than $271 billion annually to address SUD-related health issues, crime, and lost productivity [<xref ref-type="bibr" rid="ref6">6</xref>]. If we could enroll more people at-risk in treatment, we would save $4 for every treatment dollar expended [<xref ref-type="bibr" rid="ref7">7</xref>].</p><p>Artificially intelligent (AI) chatbots that deploy natural language processing (NLP) and machine learning (ML) are becoming more common in health care to facilitate patient education and outreach [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. These systems advance earlier generation rule-based chatbots that rely on &#x201C;fixed state&#x201D; messages that force users to choose from a predetermined set of responses to an interactive, user-driven system where people can initiate conversations on any number of topics and chatbots can generate answers on-the-fly. The current AI chatbots, particularly generative chatbots such as ChatGPT [<xref ref-type="bibr" rid="ref10">10</xref>], rely on access to a large language model (LLM) for training bots to correctly classify and respond to queries. These LLMs represent a much more sophisticated way to interact with people that allows for rapid exploration of vast data sources to learn how to correctly understand the intention of the queries that people make, to make inferences from data, and generate relevant answers in response to user queries.</p><p>There is an ample body of evidence from investigations of earlier, fixed-state message systems showing they can be used effectively to positively impact health behaviors and health outcomes [<xref ref-type="bibr" rid="ref11">11</xref>]. Although we do not yet know the impact of moving from fixed-state chatbots to conversational and generative AI systems, health care systems that have adopted these AI systems are optimistic that they will offer ample return on investment [<xref ref-type="bibr" rid="ref12">12</xref>]. Since the more advanced AI systems are nascent, we have not yet established standards for best practices in their design. System users and developers of AI chatbots, including generative chatbots such as ChatGPT, have raised concerns related to their use that warrant attention for health-related applications [<xref ref-type="bibr" rid="ref13">13</xref>] Ethical concerns include observations that systems could discriminate against stereotype or stigmatize users, and could also compromise privacy and data sovereignty [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref15">15</xref>]. Researchers have documented tendencies for generative chatbots to misinform [<xref ref-type="bibr" rid="ref16">16</xref>], hallucinate [<xref ref-type="bibr" rid="ref17">17</xref>], and obscure information about how data are being accessed and used [<xref ref-type="bibr" rid="ref18">18</xref>].</p><p>Recent reviews and published literature have highlighted the critical challenges that are inherent in maintaining security for AI chatbots. They highlight chronic concerns with data breaches and malicious input and called for standards such as end-to-end encryption, organizational control, and adversarial training (ie, purposefully attempting to confuse a system during development to train it to recognize potentially malicious inputs) to mitigate these [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>]. One researcher has highlighted additional critical concerns that are specific to health care organizations that seek to use AI chatbots, including a need to align chatbot design and security with Healthcare Insurance Portability and Accountability Act (HIPAA) regulations that govern patient protections in care delivery [<xref ref-type="bibr" rid="ref21">21</xref>].</p><p>Optimizing digital tools for substance use is warranted. However, we currently do not have digital tools that focus explicitly on user rights, including privacy; are evidence-based; user friendly; easily accessible, person-centered [<xref ref-type="bibr" rid="ref22">22</xref>]; and can be delivered without generating or reinforcing stigma.</p><p>In this paper, we present the infrastructure and technical specifications used to design Be Well Buddy, an AI chatbot focused on raising awareness and access to screening and treatment for SUD. It intentionally addresses the security and ethical concerns identified here. We also present findings from a beta test of the system whose goals were to verify system security and functionality.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><p>This work represents a partnership between Clinic Chat, LLC, a health technology start-up company, and The University of Texas Health Sciences Center at San Antonio (UT Health San Antonio), a US-based university with a robust clinical program called Be Well Texas, focused on screening, treatment, and social support for SUD. The principals from Clinic Chat have a background in scientific research related to health as well as the technical skill to design and deploy AI chatbots; UT Health San Antonio and Be Well Texas have a wide-reaching health education initiative focused on substance use prevention and treatment. The partnership was established to adapt health AI chatbots to focus specifically on raising awareness and providing access to screening for and treatment of SUD within the university program.</p><sec id="s2-1"><title>System Specifications</title><p>Given concerns that generative chatbots can hallucinate and misinform, along with ample evidence that careful attention to the design of messages for health communication can be more impactful than generic messaging, we determined it appropriate to develop and curate messages specific to SUD that could be delivered by a closed-domain AI chatbot system (ie, one where messages returned in response to user queries would only come from this specific library of messages). This approach allows us to avoid a common pitfall of generative AI such as ChatGPT that can reinforce biases or misinform when seeking responses to user queries. By using a closed library of responses that have been reviewed for accuracy, empathy and tone, our system can only choose from a limited group of options that are medically correct and consistent with clinical guidelines for care. Guided by literature documenting approaches to health communication that can increase engagement with messages [<xref ref-type="bibr" rid="ref23">23</xref>], we generated content that allowed for tailoring to individual users to increase content relevance (eg, by prefacing each message with their first name or by inviting them to name and explore preferred topics) [<xref ref-type="bibr" rid="ref24">24</xref>]; that could offer intuitive suggestions for behavioral decision-making [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref26">26</xref>]; that offered narrative, emotional messaging, status-enhancing and skill-building content [<xref ref-type="bibr" rid="ref27">27</xref>]; and that worked to destigmatize substance use [<xref ref-type="bibr" rid="ref28">28</xref>]. These message design strategies have consistently been shown to have a positive impact on health behavior [<xref ref-type="bibr" rid="ref29">29</xref>-<xref ref-type="bibr" rid="ref31">31</xref>]. We developed an initial library of messages and then reviewed the message content in a formative process with members of the intended audience of the chatbot to obtain their feedback on the message content, tone, acceptability, and capacity to engage people in dialogue. One key outcome from this process was the name &#x201C;Be Well Buddy,&#x201D; which we gave to the system, generated from a participant in this formative message development process (Sarah Mumby, MPH, email August 15, 2024).</p></sec><sec id="s2-2"><title>Beta Testing</title><p>Once the system was designed and built, we conducted a one-day beta test to ensure system functionality with members of the study team from Clinic Chat and UT Health San Antonio. All participants were encouraged to try and push the system to its limits. Specifically, they asked expected questions (eg, What is a substance use disorder? What is medication-assisted therapy? How much does treatment cost?) and asked multiple questions in a short period of time (ie, five-six questions in a minute). They made queries in different languages and completed all the screeners embedded in the system. They deliberately worked to confuse the system with questions that were nonsensical or unrelated to SUD. Our goals for the beta test were to (1) identify errors in sending and receiving messages and use this to determine the system&#x2019;s precision (ie, whether responses sent by the system appropriately matched the intent behind the user queries); (2) identify any problems with encryption; and (3) identify any delays in message delivery.</p></sec><sec id="s2-3"><title>Ethical Considerations</title><p>The work we describe here was reviewed by UT Health San Antonio Institutional Review Board, who deemed these activities as preliminary to research conducted exclusively by paid research staff and therefore exempted from requirements for human subjects&#x2019; approval. Identifying data for persons beta-testing the system included telephone numbers, which were encrypted once outside the UT Health San Antonio firewall. The data presented in this paper informed an observational study that has been reviewed and approved by the UT Health San Antonio Institutional Review Board (protocol number 20230662H).</p><p>A description of system specifications and functionality, and results of the beta test is given below.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>System Specifications</title><p><xref ref-type="fig" rid="figure1">Figure 1</xref> illustrates how the system retrieves and shares information, along with a description of the security steps that are in place to ensure user data are protected. In this figure, purple boxes represent end users of the system, ie, people who send and receive text messages via SMS. The blue boxes represent any third-party vendor that retrieves or sends information from the user to the Clinic Chat AI Chatbot, called &#x201C;Be Well Buddy.&#x201D; The orange box represents the firewall for UT Health San Antonio, home to Be Well Texas, the clinical entity whose patients and prospective patients are the intended users of Be Well Buddy.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>System workflow. AI: artificial intelligent; PHI: protected health information.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="humanfactors_v12i1e69144_fig01.png"/></fig><p>Data exchanges between users and the AI chatbot are described as steps in <xref ref-type="fig" rid="figure1">Figure 1</xref>. In Step 1, Be Well Buddy initiates a dialogue with users when it sends an initial SMS message to them after receiving information on their first name and telephone number from UT Health San Antonio, which they provide upon enrolling a user in the feasibility study. All SMS messages&#x2014;incoming and outgoing&#x2014;are handled by our third-party provider, Vonage Telecommunications Company (Step 2).</p><p>System users can respond to any message from Be Well Buddy. When this is done, the data are forwarded to the UT Health San Antonio Server (Step 3), where we host a Flask application designed for minimal impact on system performance. This Flask app uses a webhook to ingest the information efficiently. The lightweight nature of Flask, as opposed to heavier frameworks like Django, is crucial in maintaining fast processing speeds behind the secure firewall. The information received by the Flask app includes one piece of protected health information (PHI)&#x2014; the phone number&#x2014;which is now encrypted using SHA-256 bit encryption. This robust encryption method ensures that the data cannot be read or altered without the encryption key and remains encrypted anywhere outside the firewall, including Clinic Chat Amazon Web Server (AWS).</p><p>The message content and metadata without identifiers (eg, message time, type), along with the encrypted phone number are sent to a Clinic Chat-owned and managed AWS, known as EC2 instance that hosts a Django application to authenticate and retrieve information from the Be Well Buddy chatbot; only whitelisted IP&#x2019;s can send requests to the AWS EC2 instance.</p><p>At this point, a request is made to our IBM Watson AI (version 10.2.0) assistant that includes only the message body from the SMS message (Step 4). IBM Watson currently serves as our NLP AI system to classify message content. Prior to forwarding messages to Watson, our Django app (version 5.2) performs a variety of preprocessing tasks using regex and custom code to effectively manage number inputs, survey and screener responses, and any content that does not constitute a direct user query. While IBM Watson effectively classifies message content and returns a response (Step 5), we plan to transition to an internally hosted LLaMA integration in the future to leverage the impressive capabilities of LLMs. This response, along with the encrypted phone number, is sent back to a Flask app&#x2014;a lightweight application optimized for faster processing&#x2014;hosted behind the university server before being forwarded to the user through Vonage.</p><p>The response, message, metadata, and encrypted phone number are sent to an Amazon Web Server (AWS) Relational Database Service (RDS) instance for storage (Step 6). A dashboard hosted on the Clinic Chat subdomain (through another AWS EC2 instance) can then send requests for the system to retrieve data so team members can view large or small trends in anonymized message data (Step 7).</p><p>The system relies on data transfers with multiple third-party vendors, including Vonage that aggregates message delivery via multiple cell phone providers; IBM Watson, which classifies message content; and AWS that stores data. Following is a description of how each of these providers operate with the Be Well Buddy chatbot.</p><p>Vonage is our third-party telecommunications provider. They manage all outgoing and incoming messages. As they handle phone numbers, which are considered PHI that is regulated under the HIPAA, they have executed a Business Associate Agreement with Clinic Chat. This Business Associate Agreement ensures that both Vonage and Clinic Chat remain compliant with HIPAA regulations related to PHI by redacting all phone numbers and messages from Vonage logs. To retrieve the phone numbers and message information, the UT Health San Antonio server has generated a whitelist of IP addresses from Vonage (ie, the IP addresses for computers used by Clinic Chat staff) that will limit JSON data intakes exclusively from these IP addresses. All data will be sent to Vonage and received from Vonage through HTTPS, a secure data transfer protocol that encrypts data during transfer.</p><p>IBM Watson is used t0 store data to train our AI models, allowing the system to more precisely interpret incoming queries from users so the system can then respond with a message from our curated library that corresponds to the intent behind each user query. IBM Watson will never receive any PHI in any form from messages&#x2014;they only receive user queries as the text message body, which is used to match to one of our answers in our library of responses before returning it to the Clinic Chat AWS instance [<xref ref-type="bibr" rid="ref32">32</xref>] (Step 8).</p><p>We are using two AWS EC2 server instances&#x2014;one to host the backend server responsible for connecting the UT Health San Antonio server, IBM Watson and the database, and a second to host the front-end team interaction and database. Both are configured according to AWS HIPAA guidelines, although unencrypted PHI will never reach AWS. The backend server will only connect to the UT Health San Antonio server via HTTPS connection, IBM Watson, and AWS RDS. We whitelist the UT Health San Antonio server IP (and a few developer IPs for testing purposes), but no other IPs will be able to hit the server. The front end is also whitelisted to limited team member IPs and contains a login and user authentication system to view anonymized data.</p><p>We also use an AWS RDS instance to store our data for the project. All PHI are encrypted when entering AWS and are stored in encrypted form in RDS; they are never decrypted in AWS. The RDS database is connected to EC2 instances. The backend will only write data to the database, and the front end will only pull data.</p><p>The UT Health San Antonio server hosts a Flask app to process and encrypt PHI before passing it on to AWS, and process and decrypt phone numbers when sending it to Vonage. The app has a webhook that interprets a series of whitelisted IPs from Vonage, which will send JSON data including phone number and message content. Phone numbers are the only PHI the system receives, so the Flask app encrypts the phone number using Python Fernet encryption before sending the message content and encrypted phone number to our AWS EC2 backend instance. Additionally, the Flask app has a webhook that receives incoming JSON data from the same AWS EC2 instance. The AWS EC2 instance is whitelisted, and along with Vonage, these are the only servers that are whitelisted (outside of a dedicated IP for a developer to fix any issues). The Flask app will take in an encrypted phone number and response message from the EC2 instance, decrypt the phone number, and pass the phone number and response message back to Vonage to be sent to the user (Steps 9 and 10).</p><p>In anticipation of possible system attacks, we also built in protocols to reduce their impact should they occur. We exposed our model to anticipated queries that are adversarial (eg, &#x201C;I don&#x2019;t want to screen for SUD, leave me alone,&#x201D; or &#x201C;Your answers are stupid&#x201D;) to train for resilience and respond professionally (eg, &#x201C;I&#x2019;m sorry you are disappointed with my response. I would be happy to try again or discuss a different topic&#x201D;). When a user asks the system a question, the response is chosen from the library using NLP and then relies on probabilistic models to determine how likely it is that a response from our library will match the intent behind the user query. We monitor all logs to determine if system users and system content are behaving as expected and to identify any suspicious interactions.</p></sec><sec id="s3-2"><title>Beta Testing</title><p>The beta test was conducted over the course of one day with six users. Users sent 426 messages to the system (for an average of 71 messages per user, with a range of 25&#x2010;110 messages), and the system responded 800 times. The additional system responses included intentional follow-up prompts (eg, &#x201C;Ask me about something else! I can answer your questions about medication-assisted therapy&#x201D; or &#x201C;Are you interested in screening for SUD?&#x201D;) that were appended to responses. We documented two instances where users received responses in Spanish, although the content in the library was only available in English. Upon investigation, we determined that the system was retrieving messages from a different library within the Clinic Chat system focused on chronic illness self-management. We unlinked the libraries between the SUD and chronic conditions content to avoid this error in the future. These two errors were the only ones we documented when the system returned an incorrect response (ie, 2 of 426 or &#x003C;1%). When a user sent a nonsensical query or one that did not align with SUD, the system correctly responded with &#x201C;I&#x2019;m sorry, I am still learning and did not understand your question. Will you please ask again in different words?&#x201D; Thus, we established a &#x003E;99% level of system precision overall. We documented three instances where message return was quite slow (ie, a &#x003E;90 seconds to return a response). In all other instances, responses were sent within the first 10 seconds of message receipt. We recognized this as a problem with Vonage and reached out to report the message delays to them. We identified a problem with the responses returned to users who screened for anxiety, depression, and SUD, where those with high risk were not alerted to their risk and given a referral, necessitating a review and correction of the referral algorithm. The algorithm for screening and responses related to screening were incorrect and fixed. We did not identify any problems with system encryption or data security in transferring queries via SMS. Given the short time frame and small sample of beta testers, we did not experience any adversarial attacks to our system during the beta test and cannot report on the robustness of our protocol to address these attacks in this paper.</p></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><p>In this study, we present the technical specifications included in Be Well Buddy to facilitate access to information and screening for SUD for Be Well Texas, a clinical organization affiliated with the UTHSA. Our system adheres to HIPAA regulations for the protection of PHI while avoiding pitfalls of current generative AI chatbots by using curated chat content that does not misinform or hallucinate. Users of Be Well Buddy can get credible and complete information about SUD and can access care on a 24/7 basis. The beta test uncovered and resolved two functional errors; ie,(1) delivery of messages unrelated to SUD and (2) an incorrect algorithm for screening feedback. After correcting these errors, we verified that the system functions as intended and has a high level of precision [<xref ref-type="bibr" rid="ref33">33</xref>].</p><p>This study is not without limitations. Our objective was to ensure that the system worked without error and was secure; therefore, using a small number of beta testers was appropriate to achieve these findings [<xref ref-type="bibr" rid="ref34">34</xref>]. However, the small number of beta testers did not allow us to delve deeply into how well the content resonated for users. Further, as noted above, given the short time frame and small sample of beta testers, we did not experience any adversarial attacks to our system during the beta test and thus cannot report on the robustness of our protocol in such scenarios. These topics would be appropriate for a subsequent trial of system use.</p><p>While we stand behind our approach of using a closed library system to avoid challenges with misinformation and hallucination, this model requires regular updates to content to ensure it remains consistent with medical and professional guidelines related to SUD, SUD screening and treatment referrals. This task may evolve to become cumbersome if library updates are frequent or extensive.</p><p>AI chatbots can struggle with delineating between nuanced concepts, making it difficult to support complex questions from users. As we rely on IBM Watson for classification and other language models, this challenge persists and will require careful attention to identify inaccuracies to allow for appropriate reclassification of content when systems do not respond with precision.</p><p>Finally, we recognize that the datasets used to train AI chatbots have inherent biases, which we risk reproducing when scaling this system. This risk can be particularly challenging for the Be Well Buddy chatbot whose explicit goal is to reduce stigma. Extra effort is recognized to ensure that the content does not produce or reinforce stereotypes about substance use or people who use substances [<xref ref-type="bibr" rid="ref35">35</xref>].</p><p>According to a recent McKinsey report [<xref ref-type="bibr" rid="ref12">12</xref>], a large group of surveyed health care leaders indicated that their organizations are eager to use generative AI to enhance operations&#x2014;particularly patient engagement processes&#x2014;but most are still adopting a wait-and-see approach. While traditional rule-based chatbots have already proven effective, the AI-driven chatbots, particularly those that are driven by generative models such as ChatGPT, have yet to be proven safe for health care applications. In particular, a number of ethical concerns, such as privacy, confidentiality, bias and fairness, transparency, accountability, regulatory compliance, risk anticipation and copyright complications remain the subject of active research and mitigation in generative models. Additionally, the risks of misinformation, perpetuation of bias, or hallucination continue to dampen enthusiasm for widespread deployment of AI Chatbots in health care [<xref ref-type="bibr" rid="ref8">8</xref>].</p><p>Until effective and proven solutions are developed to address these concerns, widespread use of AI-driven chatbots in health care applications is not anticipated.</p><p>In this paper, we introduced the Be Well Buddy chatbot, a novel AI-driven chatbot that adopts a secure and reliable approach to engage with patients with SUD. Our system circumvents challenges with misinformation, bias, and hallucination, while securely delivering and supporting access to SUD screening and treatment options.</p><p>Our research is consistent with recent recommendations for optimized digital substance use interventions, such as solutions that emphasize digital rights of privacy and confidentiality, accessibility, and user-friendliness [<xref ref-type="bibr" rid="ref15">15</xref>]. In the future, we will further experiment with Be Well Buddy to evaluate and report its efficacy in promoting self-screening and referral for SUD. We anticipate integration with treatment providers who wish to accelerate referrals for care by inviting persons living in their catchment areas to use the system and for self-screening. This system may also be integrated into other organizations adjacent to SUD treatment, such as the criminal justice system where care may be mandated, or the parole system where people may seek to closely monitor their own or clients&#x2019; risk for SUD.</p></sec></body><back><ack><p>The authors gratefully acknowledge the participants who were willing to engage with the system and offer their feedback. This work was supported with funding from the National Institutes of Health, National Institute of Drug Abuse (NIDA) under grant number 1R41DA059275-01.</p></ack><notes><sec><title>Data Availability</title><p>The datasets generated or analyzed during this study are available from the corresponding author on reasonable request.</p></sec></notes><fn-group><fn fn-type="conflict"><p>SB, AJS, and JS are affiliated with the organization Clinic Chat LLC that developed and deployed the Be Well Buddy system described in this study.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">AWS</term><def><p>Amazon Web Server</p></def></def-item><def-item><term id="abb3">HIPAA</term><def><p>Healthcare Insurance Portability and Accountability Act</p></def></def-item><def-item><term id="abb4">LLM</term><def><p>large language model</p></def></def-item><def-item><term id="abb5">ML</term><def><p>machine learning</p></def></def-item><def-item><term id="abb6">NLP</term><def><p>natural language processing</p></def></def-item><def-item><term id="abb7">PHI</term><def><p>protected health information</p></def></def-item><def-item><term id="abb8">SUD</term><def><p>substance use disorder</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Salazar</surname><given-names>CI</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>Y</given-names> </name></person-group><article-title>The burden of opioid-related mortality in Texas, 1999 to 2019</article-title><source>Ann Epidemiol</source><year>2022</year><month>01</month><volume>65</volume><fpage>72</fpage><lpage>77</lpage><pub-id pub-id-type="doi">10.1016/j.annepidem.2021.09.004</pub-id><pub-id pub-id-type="medline">34560252</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="web"><source>Overdose death rates</source><year>2024</year><access-date>2022-12-14</access-date><publisher-name>National Institute of Drug Abuse</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://nida.nih.gov/research-topics/trends-statistics/overdose-death-rates">https://nida.nih.gov/research-topics/trends-statistics/overdose-death-rates</ext-link></comment></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="web"><article-title>Medicaid innovation accelerator program reducing substance use disorders. high intensity learning collaborative fact sheet</article-title><source>Medicaid Innovation Accelerator Program</source><year>2014</year><access-date>2022-07-11</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.medicaid.gov/state-resource-center/innovation-accelerator-program/iap-downloads/learn-hilciap.pdf">https://www.medicaid.gov/state-resource-center/innovation-accelerator-program/iap-downloads/learn-hilciap.pdf</ext-link></comment></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Saitz</surname><given-names>R</given-names> </name></person-group><article-title>&#x201C;SBIRT&#x201D; is the answer? Probably not</article-title><source>Addiction</source><year>2015</year><month>09</month><volume>110</volume><issue>9</issue><fpage>1416</fpage><lpage>1417</lpage><pub-id pub-id-type="doi">10.1111/add.12986</pub-id><pub-id pub-id-type="medline">26223169</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Olmstead</surname><given-names>TA</given-names> </name><name name-style="western"><surname>Yonkers</surname><given-names>KA</given-names> </name><name name-style="western"><surname>Ondersma</surname><given-names>SJ</given-names> </name><name name-style="western"><surname>Forray</surname><given-names>A</given-names> </name><name name-style="western"><surname>Gilstad-Hayden</surname><given-names>K</given-names> </name><name name-style="western"><surname>Martino</surname><given-names>S</given-names> </name></person-group><article-title>Cost-effectiveness of electronic- and clinician-delivered screening, brief intervention and referral to treatment for women in reproductive health centers</article-title><source>Addiction</source><year>2019</year><month>09</month><volume>114</volume><issue>9</issue><fpage>1659</fpage><lpage>1669</lpage><pub-id pub-id-type="doi">10.1111/add.14668</pub-id><pub-id pub-id-type="medline">31111591</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="web"><article-title>Trends &#x0026; statistics: costs of substance abuse</article-title><source>NIH National Institute on Drug Abuse</source><year>2020</year><month>06</month><comment><ext-link ext-link-type="uri" xlink:href="https://nida.nih.gov/research-topics/trends-statistics">https://nida.nih.gov/research-topics/trends-statistics</ext-link></comment></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="book"><person-group person-group-type="author"><collab>Substance Abuse and Mental Health Services Administration (US); Office of the Surgeon General (US)</collab></person-group><article-title>Vision for the future: A public health approach</article-title><source>Facing Addiction in America: The Surgeon General&#x2019;s Report on Alcohol, Drugs, and Health</source><year>2016</year><access-date>2024-12-15</access-date><publisher-name>US Department of Health and Human Services</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.ncbi.nlm.nih.gov/books/NBK424861/">https://www.ncbi.nlm.nih.gov/books/NBK424861/</ext-link></comment></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Giansanti</surname><given-names>D</given-names> </name></person-group><article-title>Artificial intelligence in public health: current trends and future possibilities</article-title><source>Int J Environ Res Public Health</source><year>2022</year><month>09</month><day>21</day><volume>19</volume><issue>19</issue><fpage>11907</fpage><pub-id pub-id-type="doi">10.3390/ijerph191911907</pub-id><pub-id pub-id-type="medline">36231208</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tudor Car</surname><given-names>L</given-names> </name><name name-style="western"><surname>Dhinagaran</surname><given-names>DA</given-names> </name><name name-style="western"><surname>Kyaw</surname><given-names>BM</given-names> </name><etal/></person-group><article-title>Conversational agents in health care: scoping review and conceptual analysis</article-title><source>J Med Internet Res</source><year>2020</year><month>08</month><day>7</day><volume>22</volume><issue>8</issue><fpage>e17158</fpage><pub-id pub-id-type="doi">10.2196/17158</pub-id><pub-id pub-id-type="medline">32763886</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="web"><person-group person-group-type="author"><collab>OpenAI</collab></person-group><source>ChatGPT (Mar 14 version) Large language model</source><year>2023</year><access-date>2024-04-15</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://chat.openai.com/chat">https://chat.openai.com/chat</ext-link></comment></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Singh</surname><given-names>B</given-names> </name><name name-style="western"><surname>Olds</surname><given-names>T</given-names> </name><name name-style="western"><surname>Brinsley</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Systematic review and meta-analysis of the effectiveness of chatbots on lifestyle behaviours</article-title><source>NPJ Digit Med</source><year>2023</year><month>06</month><day>23</day><volume>6</volume><issue>1</issue><fpage>118</fpage><pub-id pub-id-type="doi">10.1038/s41746-023-00856-1</pub-id><pub-id pub-id-type="medline">37353578</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Lamb</surname><given-names>J</given-names> </name><name name-style="western"><surname>Israelstam</surname><given-names>G</given-names> </name><name name-style="western"><surname>Agarwal</surname><given-names>R</given-names> </name><name name-style="western"><surname>Bakster</surname><given-names>S</given-names> </name></person-group><article-title>Generative AI in healthcare: adoption trends and what&#x2019;s next</article-title><year>2024</year><access-date>2024-11-11</access-date><publisher-name>McKinsey &#x0026; Company</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.mckinsey.com/industries/healthcare/our-insights/generative-ai-in-healthcare-adoption-trends-and-whats-next">https://www.mckinsey.com/industries/healthcare/our-insights/generative-ai-in-healthcare-adoption-trends-and-whats-next</ext-link></comment></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sallam</surname><given-names>M</given-names> </name></person-group><article-title>ChatGPT utility in healthcare education, research, and practice: systematic review on the promising perspectives and valid concerns</article-title><source>Healthcare (Basel)</source><year>2023</year><month>03</month><day>19</day><volume>11</volume><issue>6</issue><fpage>887</fpage><pub-id pub-id-type="doi">10.3390/healthcare11060887</pub-id><pub-id pub-id-type="medline">36981544</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Miner</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Laranjo</surname><given-names>L</given-names> </name><name name-style="western"><surname>Kocaballi</surname><given-names>AB</given-names> </name></person-group><article-title>Chatbots in the fight against the COVID-19 pandemic</article-title><source>NPJ Digit Med</source><year>2020</year><volume>3</volume><fpage>65</fpage><pub-id pub-id-type="doi">10.1038/s41746-020-0280-0</pub-id><pub-id pub-id-type="medline">32377576</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hamdoun</surname><given-names>S</given-names> </name><name name-style="western"><surname>Monteleone</surname><given-names>R</given-names> </name><name name-style="western"><surname>Bookman</surname><given-names>T</given-names> </name><name name-style="western"><surname>Michael</surname><given-names>K</given-names> </name><name name-style="western"><surname>Michael</surname><given-names>K</given-names> </name></person-group><article-title>AI-based and digital mental health apps: balancing need and risk</article-title><source>IEEE Technol Soc Mag</source><year>2023</year><volume>42</volume><issue>1</issue><fpage>25</fpage><lpage>36</lpage><pub-id pub-id-type="doi">10.1109/MTS.2023.3241309</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Menz</surname><given-names>BD</given-names> </name><name name-style="western"><surname>Modi</surname><given-names>ND</given-names> </name><name name-style="western"><surname>Sorich</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Hopkins</surname><given-names>AM</given-names> </name></person-group><article-title>Health disinformation use case highlighting the urgent need for artificial intelligence vigilance: weapons of mass disinformation</article-title><source>JAMA Intern Med</source><year>2024</year><month>01</month><day>1</day><volume>184</volume><issue>1</issue><fpage>92</fpage><lpage>96</lpage><pub-id pub-id-type="doi">10.1001/jamainternmed.2023.5947</pub-id><pub-id pub-id-type="medline">37955873</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aljamaan</surname><given-names>F</given-names> </name><name name-style="western"><surname>Temsah</surname><given-names>MH</given-names> </name><name name-style="western"><surname>Altamimi</surname><given-names>I</given-names> </name><etal/></person-group><article-title>Reference hallucination score for medical artificial intelligence chatbots: development and usability study</article-title><source>JMIR Med Inform</source><year>2024</year><month>07</month><day>31</day><volume>12</volume><fpage>e54345</fpage><pub-id pub-id-type="doi">10.2196/54345</pub-id><pub-id pub-id-type="medline">39083799</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hosseini</surname><given-names>M</given-names> </name><name name-style="western"><surname>Rasmussen</surname><given-names>LM</given-names> </name><name name-style="western"><surname>Resnik</surname><given-names>DB</given-names> </name></person-group><article-title>Using AI to write scholarly publications</article-title><source>Account Res</source><year>2024</year><month>10</month><volume>31</volume><issue>7</issue><fpage>715</fpage><lpage>723</lpage><pub-id pub-id-type="doi">10.1080/08989621.2023.2168535</pub-id><pub-id pub-id-type="medline">36697395</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>YL</given-names> </name><name name-style="western"><surname>Por</surname><given-names>LY</given-names> </name><name name-style="western"><surname>Ku</surname><given-names>CS</given-names> </name></person-group><article-title>A systematic literature review of information security in chatbots</article-title><source>Appl Sci (Basel)</source><year>2023</year><volume>13</volume><issue>11</issue><fpage>6355</fpage><pub-id pub-id-type="doi">10.3390/app13116355</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sebastian</surname><given-names>G</given-names> </name></person-group><article-title>Privacy and data protection in ChatGPT and other AI chatbots: strategies for securing user information</article-title><source>International Journal of Security and Privacy in Pervasive Computing</source><year>2023</year><volume>15</volume><fpage>1</fpage><lpage>14</lpage><pub-id pub-id-type="doi">10.4018/IJSPPC.325475</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>J</given-names> </name></person-group><article-title>Security implications of AI chatbots in health care</article-title><source>J Med Internet Res</source><year>2023</year><month>11</month><day>28</day><volume>25</volume><fpage>e47551</fpage><pub-id pub-id-type="doi">10.2196/47551</pub-id><pub-id pub-id-type="medline">38015597</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Scheibein</surname><given-names>F</given-names> </name><name name-style="western"><surname>Caballeria</surname><given-names>E</given-names> </name><name name-style="western"><surname>Taher</surname><given-names>MA</given-names> </name><etal/></person-group><article-title>Optimizing digital tools for the field of substance use and substance use disorders: backcasting exercise</article-title><source>JMIR Hum Factors</source><year>2023</year><month>12</month><day>12</day><volume>10</volume><fpage>e46678</fpage><pub-id pub-id-type="doi">10.2196/46678</pub-id><pub-id pub-id-type="medline">38085569</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bull</surname><given-names>S</given-names> </name><name name-style="western"><surname>Ezeanochie</surname><given-names>N</given-names> </name></person-group><article-title>From Foucault to Freire through Facebook: toward an integrated theory of mHealth</article-title><source>Health Educ Behav</source><year>2016</year><month>08</month><volume>43</volume><issue>4</issue><fpage>399</fpage><lpage>411</lpage><pub-id pub-id-type="doi">10.1177/1090198115605310</pub-id><pub-id pub-id-type="medline">26384499</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Collins-McNeil</surname><given-names>J</given-names> </name><name name-style="western"><surname>Edwards</surname><given-names>CL</given-names> </name><name name-style="western"><surname>Batch</surname><given-names>BC</given-names> </name><name name-style="western"><surname>Benbow</surname><given-names>D</given-names> </name><name name-style="western"><surname>McDougald</surname><given-names>CS</given-names> </name><name name-style="western"><surname>Sharpe</surname><given-names>D</given-names> </name></person-group><article-title>A culturally targeted self-management program for African Americans with type 2 diabetes mellitus</article-title><source>Can J Nurs Res</source><year>2012</year><month>12</month><access-date>2024-08-10</access-date><volume>44</volume><issue>4</issue><fpage>126</fpage><lpage>141</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://pmc.ncbi.nlm.nih.gov/articles/PMC3667585/">https://pmc.ncbi.nlm.nih.gov/articles/PMC3667585/</ext-link></comment><pub-id pub-id-type="medline">23448079</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tversky</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kahneman</surname><given-names>D</given-names> </name></person-group><article-title>The framing of decisions and the psychology of choice</article-title><source>Science</source><year>1981</year><month>01</month><day>30</day><volume>211</volume><issue>4481</issue><fpage>453</fpage><lpage>458</lpage><pub-id pub-id-type="doi">10.1126/science.7455683</pub-id><pub-id pub-id-type="medline">7455683</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Thaler</surname><given-names>RH</given-names> </name><name name-style="western"><surname>Sunstein</surname><given-names>CR</given-names> </name></person-group><source>Nudge: Improving Decisions About Health, Wealth, and Happiness Revised &#x0026; Expanded Edition</source><year>2009</year><publisher-name>Penguin Books</publisher-name></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Berger</surname><given-names>J</given-names> </name></person-group><source>Contagious: Why Things Catch On</source><year>2013</year><access-date>2025-04-25</access-date><publisher-name>Simon &#x0026; Schuster</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://jonahberger.com/books/contagious/">https://jonahberger.com/books/contagious/</ext-link></comment></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Earnshaw</surname><given-names>VA</given-names> </name></person-group><article-title>Stigma and substance use disorders: a clinical, research, and advocacy agenda</article-title><source>Am Psychol</source><year>2020</year><month>12</month><volume>75</volume><issue>9</issue><fpage>1300</fpage><lpage>1311</lpage><pub-id pub-id-type="doi">10.1037/amp0000744</pub-id><pub-id pub-id-type="medline">33382299</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ybarra</surname><given-names>ML</given-names> </name><name name-style="western"><surname>Prescott</surname><given-names>TL</given-names> </name><name name-style="western"><surname>Phillips</surname><given-names>GL</given-names> </name><name name-style="western"><surname>Bull</surname><given-names>SS</given-names> </name><name name-style="western"><surname>Parsons</surname><given-names>JT</given-names> </name><name name-style="western"><surname>Mustanski</surname><given-names>B</given-names> </name></person-group><article-title>Pilot RCT results of an mHealth HIV prevention program for sexual minority male adolescents</article-title><source>Pediatrics</source><year>2017</year><month>07</month><volume>140</volume><issue>1</issue><fpage>e20162999</fpage><pub-id pub-id-type="doi">10.1542/peds.2016-2999</pub-id><pub-id pub-id-type="medline">28659456</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bull</surname><given-names>S</given-names> </name><name name-style="western"><surname>Devine</surname><given-names>S</given-names> </name><name name-style="western"><surname>Schmiege</surname><given-names>SJ</given-names> </name><name name-style="western"><surname>Pickard</surname><given-names>L</given-names> </name><name name-style="western"><surname>Campbell</surname><given-names>J</given-names> </name><name name-style="western"><surname>Shlay</surname><given-names>JC</given-names> </name></person-group><article-title>Text messaging, teen outreach program, and sexual health behavior: a cluster randomized trial</article-title><source>Am J Public Health</source><year>2016</year><month>09</month><volume>106</volume><issue>S1</issue><fpage>S117</fpage><lpage>S124</lpage><pub-id pub-id-type="doi">10.2105/AJPH.2016.303363</pub-id><pub-id pub-id-type="medline">27689478</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Domek</surname><given-names>GJ</given-names> </name><name name-style="western"><surname>Contreras-Roldan</surname><given-names>IL</given-names> </name><name name-style="western"><surname>Bull</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Text message reminders to improve infant immunization in Guatemala: a randomized clinical trial</article-title><source>Vaccine (Auckl)</source><year>2019</year><month>09</month><day>30</day><volume>37</volume><issue>42</issue><fpage>6192</fpage><lpage>6200</lpage><pub-id pub-id-type="doi">10.1016/j.vaccine.2019.08.046</pub-id><pub-id pub-id-type="medline">31492475</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="report"><person-group person-group-type="author"><collab>IBM</collab></person-group><article-title>Watson privacy, compliance &#x0026; security proof of value (POV) documentation</article-title><year>2018</year><access-date>2024-12-15</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.ibm.com/watson/assets/duo/pdf/Watson-Privacy-and-Security-POV_final_062819_tps.pdf">https://www.ibm.com/watson/assets/duo/pdf/Watson-Privacy-and-Security-POV_final_062819_tps.pdf</ext-link></comment></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Williamson</surname><given-names>SM</given-names> </name><name name-style="western"><surname>Prybutok</surname><given-names>V</given-names> </name></person-group><article-title>The era of artificial intelligence deception: unraveling the complexities of false realities and emerging threats of misinformation</article-title><source>Information</source><year>2024</year><volume>15</volume><issue>6</issue><fpage>299</fpage><pub-id pub-id-type="doi">10.3390/info15060299</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nielsen</surname><given-names>J</given-names> </name><name name-style="western"><surname>Landauer</surname><given-names>TK</given-names> </name></person-group><article-title>A mathematical model of the finding of usability problems</article-title><source>CHI &#x2019;93: Proceedings of the INTERACT &#x2019;93 and CHI &#x2019;93 Conference on Human Factors in Computing Systems</source><year>1993</year><month>05</month><day>1</day><fpage>206</fpage><lpage>213</lpage><pub-id pub-id-type="doi">10.1145/169059.169166</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Beattie</surname><given-names>H</given-names> </name><name name-style="western"><surname>Watkins</surname><given-names>L</given-names> </name><name name-style="western"><surname>Robinson</surname><given-names>WH</given-names> </name><name name-style="western"><surname>Rubin</surname><given-names>A</given-names> </name><name name-style="western"><surname>Watkins</surname><given-names>S</given-names> </name></person-group><article-title>Measuring and mitigating bias in AI-chatbots</article-title><year>2022</year><conf-name>2022 IEEE International Conference on Assured Autonomy (ICAA)</conf-name><conf-loc>Fajardo, PR, USA</conf-loc><fpage>117</fpage><lpage>123</lpage><pub-id pub-id-type="doi">10.1109/ICAA52185.2022.00023</pub-id></nlm-citation></ref></ref-list></back></article>