<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR Human Factors</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Hum Factors</journal-id>
      <journal-title>JMIR Human Factors</journal-title>
      <issn pub-type="epub">2292-9495</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v9i1e35358</article-id>
      <article-id pub-id-type="pmid">35348468</article-id>
      <article-id pub-id-type="doi">10.2196/35358</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>The Acceptability of Virtual Characters as Social Skills Trainers: Usability Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Kushniruk</surname>
            <given-names>Andre</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Okada</surname>
            <given-names>Shogo</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Chollet</surname>
            <given-names>Mathieu</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Chaudhry</surname>
            <given-names>Beenish</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Tanaka</surname>
            <given-names>Hiroki</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Division of Information Science</institution>
            <institution>Nara Institute of Science and Technology</institution>
            <addr-line>Takayamacho 8916-5</addr-line>
            <addr-line>Ikoma-shi, 630-0192</addr-line>
            <country>Japan</country>
            <phone>81 9076493408</phone>
            <email>hiroki-tan@is.naist.jp</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0548-6252</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Nakamura</surname>
            <given-names>Satoshi</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6956-3803</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Division of Information Science</institution>
        <institution>Nara Institute of Science and Technology</institution>
        <addr-line>Ikoma-shi</addr-line>
        <country>Japan</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Data Science Center</institution>
        <institution>Nara Institute of Science and Technology</institution>
        <addr-line>Ikoma-shi</addr-line>
        <country>Japan</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Hiroki Tanaka <email>hiroki-tan@is.naist.jp</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <season>Jan-Mar</season>
        <year>2022</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>29</day>
        <month>3</month>
        <year>2022</year>
      </pub-date>
      <volume>9</volume>
      <issue>1</issue>
      <elocation-id>e35358</elocation-id>
      <history>
        <date date-type="received">
          <day>2</day>
          <month>12</month>
          <year>2021</year>
        </date>
        <date date-type="rev-request">
          <day>20</day>
          <month>1</month>
          <year>2022</year>
        </date>
        <date date-type="rev-recd">
          <day>30</day>
          <month>1</month>
          <year>2022</year>
        </date>
        <date date-type="accepted">
          <day>21</day>
          <month>2</month>
          <year>2022</year>
        </date>
      </history>
      <copyright-statement>©Hiroki Tanaka, Satoshi Nakamura. Originally published in JMIR Human Factors (https://humanfactors.jmir.org), 29.03.2022.</copyright-statement>
      <copyright-year>2022</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Human Factors, is properly cited. The complete bibliographic information, a link to the original publication on https://humanfactors.jmir.org, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://humanfactors.jmir.org/2022/1/e35358" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Social skills training by human trainers is a well-established method to provide appropriate social interaction skills and strengthen social self-efficacy. In our previous work, we attempted to automate social skills training by developing a virtual agent that taught social skills through interaction. Previous research has not investigated the visual design of virtual agents for social skills training. Thus, we investigated the effect of virtual agent visual design on automated social skills training.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>The 3 main purposes of this research were to investigate the effect of virtual agent appearance on automated social skills training, the relationship between acceptability and other measures (eg, likeability, realism, and familiarity), and the relationship between likeability and individual user characteristics (eg, gender, age, and autistic traits).</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We prepared images and videos of a virtual agent, and 1218 crowdsourced workers rated the virtual agents through a questionnaire. In designing personalized virtual agents, we investigated the acceptability, likeability, and other impressions of the virtual agents and their relationship to individual characteristics.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>We found that there were differences between the virtual agents in all measures (<italic>P</italic>&#60;.001). A female anime-type virtual agent was rated as the most likeable. We also confirmed that participants’ gender, age, and autistic traits were related to their ratings.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>We confirmed the effect of virtual agent design on automated social skills training. Our findings are important in designing the appearance of an agent for use in personalized automated social skills training.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>social skills training</kwd>
        <kwd>virtual agent design</kwd>
        <kwd>virtual assistant</kwd>
        <kwd>virtual trainer</kwd>
        <kwd>chatbot</kwd>
        <kwd>acceptability</kwd>
        <kwd>realism</kwd>
        <kwd>virtual agent</kwd>
        <kwd>simulation</kwd>
        <kwd>social skill</kwd>
        <kwd>social interaction</kwd>
        <kwd>design</kwd>
        <kwd>training</kwd>
        <kwd>crowdsourcing</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Social skills training is a method widely applied to help people who lack social skills. It is used in medical hospitals, employment support facilities, workplaces, schools, and various other institutions [<xref ref-type="bibr" rid="ref1">1</xref>]. Social skills training is generally conducted by a human trainer to promote appropriate social interaction skills and strengthen social self-efficacy [<xref ref-type="bibr" rid="ref2">2</xref>]. The Bellack method (or step-by-step social skills training) is a well-structured and widely used evidence-based approach [<xref ref-type="bibr" rid="ref1">1</xref>]. It is a cognitive behavioral approach to social skills training inspired by the 5 core principles of social learning theory: modeling, shaping, reinforcement, overlearning, and generalization [<xref ref-type="bibr" rid="ref3">3</xref>]. The Bellack method defines the social skills training framework and its 4 basic skills: expressing positive feelings, listening to others, making requests, and expressing unpleasant feelings. These skills are beneficial for all people (not only those with autistic traits or schizophrenia) [<xref ref-type="bibr" rid="ref1">1</xref>]. In particular, autism spectrum disorder (ASD) is a spectrum condition [<xref ref-type="bibr" rid="ref4">4</xref>], meaning it has a broad range of characteristics, from mild to severe. Using computer agents in social skills training is motivated by the fact that even though some people with high-functioning autism experience difficulty during social communication, they also show good or even superior systemizing skills [<xref ref-type="bibr" rid="ref5">5</xref>]. Systemizing is the drive to analyze or build systems and understand and predict behavior in terms of underlying rules and regularities. The use of systematic computer-based training for people who need to improve their social skills has the following benefits: (1) it uses a computerized environment that is predictable, consistent, and free from social demands; (2) users can work at their own pace and level of understanding; (3) training can be repeated until the goal is achieved; and (4) interest and motivation can be maintained through computerized rewards. It may also be easier for those who suffer from social difficulties to use computer agents than to directly interact with humans [<xref ref-type="bibr" rid="ref6">6</xref>]. A past paper suggested that people with social difficulties such as ASD feel safer and more comfortable in virtual interactions than in interactions with actual people [<xref ref-type="bibr" rid="ref7">7</xref>].</p>
      <p>We and other research groups have been conducting studies to automate social skills training using virtual agents, and this work has led to the development of automatic social skills training [<xref ref-type="bibr" rid="ref8">8</xref>-<xref ref-type="bibr" rid="ref12">12</xref>] that by design resembles human-led social skills training [<xref ref-type="bibr" rid="ref10">10</xref>]. The use of conversational agents in health care was reviewed by Tudor Car et al and Milne-Ives et al [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. Among types of conversational agents, our system includes video modeling of human behavior, real-time behavior recognition, and feedback. We previously confirmed the effectiveness of this training in children and adults with ASD and in the general population. The automated social skills training agent plays 2 roles: as a trainer and as a listener. We confirmed that the system was more effective in training social skills than the traditional methods of reading books or watching videos of role models, and that talking to a 3D virtual agent made users feel more comfortable and less tense than talking to a human [<xref ref-type="bibr" rid="ref15">15</xref>]. Automated social skills training targets various populations, from children to adult men and women, as well as those with ASD or schizophrenia [<xref ref-type="bibr" rid="ref1">1</xref>]. However, visual designs of virtual agents, and what kind of design is more favored or more accepted, has not yet been investigated. A previous study showed that the quality of the therapeutic alliance (ie, the level of rapport and trust) is a reliable predictor of positive clinical outcomes independent of the approach to psychotherapy (including social skills training [<xref ref-type="bibr" rid="ref1">1</xref>] and cognitive behavioral therapy [<xref ref-type="bibr" rid="ref16">16</xref>]) or the specific outcome measure [<xref ref-type="bibr" rid="ref17">17</xref>]. For automatic social skills training to be adopted and accepted by individuals, detailed investigation is necessary. In this study, we focus on comparing virtual agents with varying visual designs, rather than comparing humans and robots [<xref ref-type="bibr" rid="ref18">18</xref>] for assistive technology [<xref ref-type="bibr" rid="ref19">19</xref>], because we consider that the design of virtual agents is easier to create and modify.</p>
      <p>The visual design of the virtual agent in social skills training has been previously investigated, although not exhaustively. For example, Hoque et al [<xref ref-type="bibr" rid="ref12">12</xref>] paired male participants with a male coach and female participants with a female coach in order to minimize gender-based variability in behavior. By contrast, Tanaka et al [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref15">15</xref>] did not consider the agent’s gender (they used only a female design). Previous studies have used various virtual agent designs for different tasks and compared their appearance and behavior [<xref ref-type="bibr" rid="ref20">20</xref>-<xref ref-type="bibr" rid="ref22">22</xref>], realism [<xref ref-type="bibr" rid="ref23">23</xref>], intensity in dialogue scenarios, and the appropriateness of body and eye proportions [<xref ref-type="bibr" rid="ref24">24</xref>]. Past studies have also created a voice designed for the elderly [<xref ref-type="bibr" rid="ref25">25</xref>] and have examined the impact of gender and race on users’ self-efficacy [<xref ref-type="bibr" rid="ref26">26</xref>]. Troncone et al [<xref ref-type="bibr" rid="ref27">27</xref>] discussed seniors’ psychological perspectives in terms of the model of acceptance and associated factors. Our study applies these findings and rating measures to investigate the design of our virtual agents, aiming to create a more favorable and acceptable design for automated social skills training. To the best of our knowledge, previous work has not investigated the visual design of virtual agents for automated social skills training, the relationship between acceptability and other measures, and the relationship between likeability and individual user characteristics.</p>
      <p>This study set Japanese adults as our target users. We prepared a variety of new virtual agent designs for social skills training and evaluated them with multiple items on a questionnaire: their acceptability as a trainer; their acceptability as a listener; their realism, familiarity, trustworthiness, and eeriness; the likeability of their face, eyes, hair, perceived age, and voice; and their overall impression. These criteria were chosen with reference to the studies of Esposito et al [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref25">25</xref>] and Ring et al [<xref ref-type="bibr" rid="ref24">24</xref>]. We followed their statistical analysis framework and investigated the appearance of 3D characters in the context of automated social skills training. First, we evaluated virtual agent visual design, particularly realism. Previous work has showed that serious tasks, such as medical diagnosis, require realistic agents; on the other hand, anime-like agents are more suited to social chitchat-like dialogue systems [<xref ref-type="bibr" rid="ref24">24</xref>]. We hypothesized that anime-like characters would be preferable and more accepted for automated social skills training since such training requires friendly characteristics to maintain participant safety, and because agents play 2 roles: as trainers and as listeners. In addition, realism is affected by the “uncanny valley” phenomenon, with the most unrealistic character often being rated as the most acceptable [<xref ref-type="bibr" rid="ref23">23</xref>]. We hypothesized that we would find that the uncanny valley also applies to automated social skills training agents. Second, to examine new factors that correlate to acceptability, we quantified the relationships between acceptability and other measures. We hypothesized that these questionnaire items would be highly correlated with each other [<xref ref-type="bibr" rid="ref23">23</xref>]. Finally, we investigated the differences in preference for virtual agent design by considering individual users’ gender, age, and autistic traits in order to enable personalized automated social skills training. The three main research problems were (1) to investigate the visual appearance of virtual agents for automated social skills training; (2) to investigate the relationship between acceptability and other measures (eg, likeability, face, voice, realism, and familiarity); and (3) to investigate the relationship between acceptability and the individual characteristics of the user (ie, gender, age, and autistic traits).</p>
      <p>This paper is an extension of conference proceedings [<xref ref-type="bibr" rid="ref28">28</xref>] in which we reported on the visual design of characters. This paper adds an analysis of realism and includes a greater number of participants. We created new agents and videos and evaluated their realism. We also investigated whether people with high or low autistic traits rated the likeability of virtual agents differently depending on the realism of the agent. We also analyzed the correlation matrix between all questionnaire items in order to confirm correlations between acceptability and other measures. Finally, this paper discusses and summarizes findings from a series of experiments.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Visual Design of Virtual Agents</title>
        <p>We first prepared an illustration of a virtual agent, as shown in <xref rid="figure1" ref-type="fig">Figure 1</xref>. The virtual agents were designed by a company specializing in Japanese animation.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Images of the 9 virtual characters and representative measures collected from data set 1 and data set 2.</p>
          </caption>
          <graphic xlink:href="humanfactors_v9i1e35358_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>All characters faced the front with no emotional expression. Characters A and B (female) and C and D (male) were designed with a consistent age, with only the degree of their realism and gender changing. Character E was an inanimate object created for use with children. Character F was a nonhuman animal (a dog), also for use with children. For character G, we created a realistic 3D model similar in appearance to characters A and B and took a screen capture from the front. Character H was the default agent provided by the Greta platform (developed by Pelachaud et al) [<xref ref-type="bibr" rid="ref29">29</xref>], which is an embodied conversation agent that can be created with the Autodesk character generator (Autodesk Inc.) [<xref ref-type="bibr" rid="ref30">30</xref>]. Character H was intended for use mainly with French- and English-speaking users. Character F was designed for Japanese female users. In the current study of automated social skills training, character I was selected as the virtual character [<xref ref-type="bibr" rid="ref10">10</xref>]. The representation of characters H and I was created by taking a screen capture from the front.</p>
        <p>The sentence “Hello, let’s practice communication together” was embedded in the image with both a male and a female voice. The utterance was 5 seconds in length and spoken by Google Text-to-Speech. Characters E and F were created with higher-pitched voices than those used for normal female speech synthesis, to mimic children’s voices.</p>
        <p>Since 3D models were available for characters H and I, we were also able to create videos for them in Greta (<xref rid="figure2" ref-type="fig">Figure 2</xref>). Movements and gestures were added, such as the character raising its hands or putting its hands on its chest, synchronized to the speech content. These same behaviors and synchronization for characters H and I were also generated in Japanese, with an utterance length of 8 seconds. The speech synthesis used the voice of the character “Yuki” in CereProc (CereProc Ltd.).</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Screen captures of videos of two of the virtual characters (A) and representative measures collected from data set 3 (B, C).</p>
          </caption>
          <graphic xlink:href="humanfactors_v9i1e35358_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>We further analyzed the effect of realism by designing additional virtual agents, also with the aid of a design company specializing in Japanese animation. These agents were designed using the Maya tool (Autodesk Inc.). We prepared 6 levels of realism, following a previously reported method [<xref ref-type="bibr" rid="ref23">23</xref>]. The degrees of realism were as follows: (1) pencil toon, (2) flat toon, (3) shaded toon, (4) bare toon, (5) computer-generated toon, and (6) human (with subsurface scattering), as shown in <xref rid="figure3" ref-type="fig">Figure 3</xref>. The same behavior was generated for these 6 agents, with Japanese speech synthesis and lip-synching using the same words as described above. All of these images and movies are available upon request to the first author.</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Screen captures of the 6 virtual agents (A); acceptability as a trainer in data set 4 (error bars represent SE) (B); and the evaluation of likeability by high and low SRS score groups (C).</p>
          </caption>
          <graphic xlink:href="humanfactors_v9i1e35358_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Participants</title>
        <p>For data collection, we recruited participants from a crowdsourcing service (Crowdworks). The recruitment notice asked for participants 18 years of age or older with Japanese nationality. In order to divide the task among the participants, data were collected in 4 separate data sets with different participants. Data set 1 had 305 participants (with a male to female ratio of 148 to 157), data set 2 had 301 participants (with a male to female ratio of 131 to 170), data set 3 had 302 participants (with a male to female ratio of 145 to 157), and data set 4 had 305 participants (with a male to female ratio of 145 to 160). All data sets can be found in multimedia appendix. Data set 1 was used to investigate image acceptability, likeability, familiarity, likeability of certain elements (ie, eyes, face, hair, voice, and perceived age), autistic traits, and alexithymia. Data set 2 was used to investigate realism, trustworthiness, and eeriness of the agents. Data set 3 was used to investigate the videos with characters H and I. Data set 4 was used to investigate the realism of the movies, as well as autistic traits. For the validation to have a sufficient sample size, we collected a larger sample size for each data set compared to previous works, which have recruited around 40 to 70 participants from regional communities [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>] or have used Amazon’s Mechanical Turk platform [<xref ref-type="bibr" rid="ref24">24</xref>]. In this study, we also performed a grouped analysis using 45 years as the threshold for high and low age groups (high age: n=84; low age: n=21).</p>
      </sec>
      <sec>
        <title>Autistic Traits</title>
        <p>In data set 1 and data set 4, we used the adult version of the Social Responsiveness Scale-2 (SRS) [<xref ref-type="bibr" rid="ref31">31</xref>] to assess autistic traits. This measures how many autistic traits an individual shows and can be used across the general population, not only with people who are suspected of having ASD. In data set 1, we measured the Toronto Alexithymia Scale-20 (TAS) [<xref ref-type="bibr" rid="ref32">32</xref>] to assess alexithymia. In both cases, we calculated the total score. We did not calculate subscales in this study. In data set 1, the 2 questionnaires had a Spearman correlation coefficient of 0.67 (<italic>P</italic>&#60;.001), which indicates a high correlation between autistic traits and alexithymia. We are currently planning a future analysis that will use SRS as a measure of autistic traits. In this study, we used a cutoff value of 81 points [<xref ref-type="bibr" rid="ref33">33</xref>] as the threshold for high and low SRS score (subjects with a high SRS score: n=113; low SRS score: n=192). We also measured SRS scores in data set 4 and also set a threshold for high and low SRS scores in that data set (high: n=129, low: n=177).</p>
      </sec>
      <sec>
        <title>Measures</title>
        <p>Questionnaire items and scales were prepared with reference to studies by Esposito et al and Ring et al [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>]. The questionnaire items measured the acceptability of the agent as a trainer and as a listener; its realism, familiarity, trustworthiness, and eeriness; the likeability of its face, eyes, hair, perceived age, and voice; and its overall impression. Each question was answered through a Google Form. In data set 1, each question item was answered after completing the SRS and TAS. In data set 3, in addition to the above, we added the likeability of the clothes the agent wore, because the video included the entire upper body of the virtual agent. We asked the participants to read a description of the concept of social skills training (in particular, the function of a virtual agent to train the user’s social communication skills and also listen to the user). We performed a preliminary test with a few adults to check whether the participants understood the social skills training, and we wrote instructions. Participants first looked at a set of all the images (<xref rid="figure1" ref-type="fig">Figure 1</xref>) to get an impression of all the virtual agents, and they then watched the individual virtual agents and answered each question. The questions were evaluated with a 5-point Likert scale (from 1, “I don’t think so at all,” to 5 “I think so very much”). Spearman ρ was calculated to determine the relationship between the questionnaire items.</p>
        <p>R (R Foundation for Statistical Computing) was used for the analysis. Since normality could not be confirmed in the ratings of the questions by the Kolmogorov-Smirnov test, the Kruskal-Wallis test was used to examine the differences between the virtual agents. In the analysis for each group of gender, age, and SRS, we calculated the effect size (<italic>r</italic>). We report the top 3 combinations of <italic>r</italic> from all combinations of virtual agents and questionnaire items. Furthermore, we performed the Wilcoxon signed-rank test to compare pairs of factors.</p>
      </sec>
      <sec>
        <title>Ethical Considerations</title>
        <p>This was an anonymous study in which the participants enrolled themselves by registering through Crowdworks and agreeing to participate in the study. Since participation was anonymized, the study was exempt from registration with our institutional review board.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <p>In reporting the results, we did not report all measures, in order to focus on significant findings. The following is a summary of the experimental results.</p>
      <p>First, the differences in ratings between the virtual characters. The Kruskal-Wallis test confirmed that there were significant differences between the virtual characters in all measures (<italic>P</italic>&#60;.001). Regarding realism, the distribution was as expected in the original design: character A was more realistic than character B and character G was the most realistic. The most preferred virtual character among the participants was character B, averaging 3.29 (SD 1.0) (<xref rid="figure1" ref-type="fig">Figure 1</xref>). Character B was also highly evaluated in other questionnaire items. We also found that the male characters, C and D, and the nonhuman characters, E and F, had lower likeability than character B, and that character H had less likeability and less familiarity.</p>
      <p>Next, the correlations between questionnaire items. <xref rid="figure4" ref-type="fig">Figure 4</xref> shows the correlation matrix. There was a high correlation between face and preference (ρ=0.78, <italic>P</italic>&#60;.001). There was also a high correlation between acceptance as a trainer and acceptance as a listener (ρ=0.80, <italic>P</italic>&#60;.001). On the other hand, although a significant difference was confirmed regarding voice preference and other questionnaire items, the correlation coefficient was relatively low.</p>
      <p><xref ref-type="table" rid="table1">Table 1</xref> lists the top 3 combinations of virtual agents and questionnaire items that had the highest effect size (<italic>r</italic>) for gender, age, and SRS score. All cases with a statistically significant difference are listed in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>. Male subjects evaluated character G’s face, overall likeability, and acceptability as a trainer more highly than did female subjects. The higher age group evaluated character I’s eyes and face more highly than did the lower age group. The high SRS score group evaluated the likeability of character G’s eyes and hair more highly than did the low SRS score group.</p>
      <p><xref rid="figure2" ref-type="fig">Figure 2</xref> shows a comparison of the videos of characters H and I, indicating that acceptability and familiarity were significantly greater for character I than H (all <italic>P</italic>&#60;.001).</p>
      <p><xref rid="figure5" ref-type="fig">Figure 5</xref> shows the overall rating for realism for the agents shown in <xref rid="figure3" ref-type="fig">Figure 3</xref>. The Kruskal-Wallis test confirmed that the virtual agents differed significantly in realism (<italic>P</italic>&#60;.001) and confirmed our design assumption that agents 1 through 6 would have increasingly greater realism. <xref rid="figure3" ref-type="fig">Figure 3</xref> shows the acceptability as a trainer and likeability of the virtual agents. The Kruskal-Wallis test confirmed that the virtual agents differed significantly in all measures (<italic>P</italic>&#60;.001). We found a small difference between the high and low SRS score groups in their evaluation of likeability (<xref rid="figure3" ref-type="fig">Figure 3</xref> lower right), but the Wilcoxon rank-sum test showed no significant difference (for character 1, <italic>P</italic>=.13 and for character 6, <italic>P</italic>=.25) and a small effect size.</p>
      <fig id="figure4" position="float">
        <label>Figure 4</label>
        <caption>
          <p>Correlation matrix of measures.</p>
        </caption>
        <graphic xlink:href="humanfactors_v9i1e35358_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <table-wrap position="float" id="table1">
        <label>Table 1</label>
        <caption>
          <p>Relationship between questionnaire items and gender, age, and SRS score.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="30"/>
          <col width="250"/>
          <col width="240"/>
          <col width="240"/>
          <col width="240"/>
          <thead>
            <tr valign="top">
              <td colspan="2">User characteristic</td>
              <td>Questionnaire item</td>
              <td><italic>r</italic> (<italic>P</italic> value)</td>
              <td>Trend</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td colspan="5">
                <bold>Gender</bold>
              </td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Character G</td>
              <td>Face</td>
              <td>0.29 (&#60;.001)</td>
              <td>Male &#62; female</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Character G</td>
              <td>Likeability</td>
              <td>0.25 (&#60;.001)</td>
              <td>Male &#62; female</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Character G</td>
              <td>Trainer</td>
              <td>0.25 (&#60;.001)</td>
              <td>Male &#62; female</td>
            </tr>
            <tr valign="top">
              <td colspan="5">
                <bold>Age</bold>
              </td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Character I</td>
              <td>Eyes</td>
              <td>0.21 (&#60;.001)</td>
              <td>High &#62; low</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Character I</td>
              <td>Face</td>
              <td>0.19 (&#60;.001)</td>
              <td>High &#62; low</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Character A</td>
              <td>Listener</td>
              <td>0.17 (.003)</td>
              <td>High &#60; low</td>
            </tr>
            <tr valign="top">
              <td colspan="5">
                <bold>SRS score</bold>
              </td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Character G</td>
              <td>Eyes</td>
              <td>0.19 (.001)</td>
              <td>High &#62; low</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Character G</td>
              <td>Hair</td>
              <td>0.18 (.002)</td>
              <td>High &#62; low</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Character G</td>
              <td>Face</td>
              <td>0.16 (.009)</td>
              <td>High &#62; low</td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
      <fig id="figure5" position="float">
        <label>Figure 5</label>
        <caption>
          <p>Realism measures collected from data set 4. Error bars represent SE.</p>
        </caption>
        <graphic xlink:href="humanfactors_v9i1e35358_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <p>The objective of this study was to examine virtual agent visual design for automated social skills training, the relationship between acceptability and other measures, and the relationship between likeability and individual user characteristics. We also investigated the acceptability and likeability of the virtual agents, as well as various other measures. We were able to confirm that the virtual agents had different ratings. First, we found that the realism of the virtual agent design could be controlled through the selection of characters A, B, or G. We found that character B, originally designed as an anime-like teenage female character, was the most likable (<xref rid="figure1" ref-type="fig">Figure 1</xref>). Since Japanese people are rather accustomed to watching anime-like videos, familiarity with such characters is high. The anime art form, having originated in Japan in the early 1900s, is a uniquely stylized form of 2D and 3D illustration [<xref ref-type="bibr" rid="ref34">34</xref>]. Such a female anime-like character was also integrated and familiarized in our previous research on automated social skills training [<xref ref-type="bibr" rid="ref8">8</xref>]. On the other hand, other virtual characters, such as the inanimate object (character E) or the animal (character F), as well as characters G and H, were less accepted and were not preferred.</p>
      <p>We found significant correlations between questionnaire items (<italic>P</italic>&#60;.001) and a high correlation between face and preference. This face factor influenced the development of the automated social skills training. There was also a high correlation between acceptance as a trainer and acceptance as a listener (<xref rid="figure4" ref-type="fig">Figure 4</xref>). In this case, we could not confirm the difference between the role as trainer and that as listener, because no continuous interactive dialogue was available. When the roles of virtual characters are more carefully chosen in the future, we assume that an investigation of this issue will also be necessary. Since the same voice was used for each virtual character, the correlation coefficient was relatively low. Therefore, we should explore the effect of voice using a variety of speech synthesizers in the future.</p>
      <p>We also found very similar tendencies in video versions of the training agents. However, in terms of familiarity, we confirmed that the rating for the video version of character H was higher than its image version due to the addition of naturalistic movement. Regarding the videos shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>, acceptability, and familiarity were significantly greater for character I than H. This shows that Japanese users preferred the anime-like character I over the original Greta character H.</p>
      <p>We also found that realism, as shown in <xref rid="figure5" ref-type="fig">Figure 5</xref>, was associated with acceptability and likeability (<xref rid="figure3" ref-type="fig">Figure 3</xref>), a finding that is similar to that reported by McDonnell et al [<xref ref-type="bibr" rid="ref23">23</xref>]. This may be related to the uncanny valley effect [<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref36">36</xref>] and represent an intermediary between the responses to characters 3 (shaded toon) and 4 (bare toon). Although the most highly evaluated agent was character 6, the human with subsurface scattering, this sort of agent may need high-quality 3D modeling for its appearance and movement to be natural enough for use in automated social skills training. Thus, the second-ranked character, character 3 (shaded toon), may be the most promising for a realistic virtual agent for automated social skills training.</p>
      <p>We found that the female virtual character, character G, was rated as more preferred by male participants. In addition, since we confirmed that character B was also significantly highly rated by male participants, it appears that the male participants rated female virtual characters as more preferable. Character B, originally designed as an anime-like teenage female character, was judged the most likable by all participants. We found that character I was preferred by older participants. Since character I was designed to appear relatively older (and was originally designed for participants in their 40s), it seems that the older group rated characters closer to their own age as more trustworthy. Therefore, when developing automated social skills training for older users, character I might be the most appropriate type of visual design. In this paper, one of our goals was to analyze the effect of autistic traits. We found that autistic traits were strongly associated with alexithymia (Spearman ρ=0.67). Thus, we focused only on SRS score to measure autistic traits. Our results showed that people with high autistic traits had a preference for realistic agents. We also confirmed that the group with high autistic traits gave a high rating to characters G and H in data set 1. This is a similar finding to previous work [<xref ref-type="bibr" rid="ref18">18</xref>]. However, we did not find a difference in the case of data set 4.</p>
      <p>Further investigation is needed to examine altered cognition in autism and its effects in order to conduct a comparison of virtual agents and real human agents. Although the target population of this study was adults 18 years or older, children with ASD may prefer nonhuman virtual agents, such as trains [<xref ref-type="bibr" rid="ref5">5</xref>]. We must consider the effects of virtual agents in younger users. In future work, we hope to examine the effect of cultural differences, younger age, and virtual agent facial expressions on acceptability. These features could be used as variables of interest. In addition, this study did not confirm whether crowdsourced workers have sufficient knowledge of social skills training. Consequently, we need to investigate the effects of integrating design into an interactive social skills training dialogue system [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>].</p>
      <sec>
        <title>Conclusions</title>
        <p>In this study, we prepared various new virtual agent visual designs for social skills training and evaluated the designs based on multiple questionnaire items that assessed likeability, acceptability, realism, familiarity, and trustworthiness, among other factors, in a study sample of 1218 crowdsourced evaluators. We tested differences in preferences for virtual agent visual designs based on the gender, age, and autistic traits of the participants, in order to create personalized virtual agents. We found that our participants preferred, perhaps through familiarity, anime-like characters, likely because Japanese people are rather accustomed to watching anime-like videos. Our conclusion for implementing an optimal virtual agent for use with Japanese users is generally to design a female anime-type agent (especially a toon-shaded type), which has been shown to be favored and acceptable. We also found that preferences for virtual agent visual design differed according to user gender, age, and autistic traits. For example, we confirmed that users with high autistic traits showed a high preference for virtual agents with a realistic appearance.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>All cases with a statistical difference.</p>
        <media xlink:href="humanfactors_v9i1e35358_app1.docx" xlink:title="DOCX File , 14 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Data set 1 to 4.</p>
        <media xlink:href="humanfactors_v9i1e35358_app2.zip" xlink:title="ZIP File  (Zip Archive), 58 KB"/>
      </supplementary-material>
    </app-group>
    <ack>
      <p>This work was funded by Japan Science and Technology Agency CREST (grant JPMJCR19M5) and Japan Society for the Promotion of Science KAKENHI (grants JP17H06101 and JP18K11437).</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bellack</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Meuser</surname>
              <given-names>KT</given-names>
            </name>
            <name name-style="western">
              <surname>Gingerich</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Agresta</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <source>Social Skills Training for Schizophrenia: A Step-by-Step Guide</source>
          <year>1997</year>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>Guilford Press</publisher-name>
          <fpage>637</fpage>
          <lpage>638</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liberman</surname>
              <given-names>R P</given-names>
            </name>
            <name name-style="western">
              <surname>Mueser</surname>
              <given-names>K T</given-names>
            </name>
            <name name-style="western">
              <surname>Wallace</surname>
              <given-names>C J</given-names>
            </name>
          </person-group>
          <article-title>Social skills training for schizophrenic individuals at risk for relapse</article-title>
          <source>Am J Psychiatry</source>
          <year>1986</year>
          <month>04</month>
          <volume>143</volume>
          <issue>4</issue>
          <fpage>523</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1176/ajp.143.4.523</pub-id>
          <pub-id pub-id-type="medline">2869704</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Skinner</surname>
              <given-names>BF</given-names>
            </name>
          </person-group>
          <source>Science And Human Behavior</source>
          <year>1953</year>
          <publisher-loc>New York</publisher-loc>
          <publisher-name>Macmillan</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Frith</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Happé</surname>
              <given-names>Francesca</given-names>
            </name>
          </person-group>
          <article-title>Autism spectrum disorder</article-title>
          <source>Curr Biol</source>
          <year>2005</year>
          <month>10</month>
          <day>11</day>
          <volume>15</volume>
          <issue>19</issue>
          <fpage>R786</fpage>
          <lpage>90</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0960-9822(05)01102-4"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.cub.2005.09.033</pub-id>
          <pub-id pub-id-type="medline">16213805</pub-id>
          <pub-id pub-id-type="pii">S0960-9822(05)01102-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Golan</surname>
              <given-names>Ofer</given-names>
            </name>
            <name name-style="western">
              <surname>Baron-Cohen</surname>
              <given-names>Simon</given-names>
            </name>
          </person-group>
          <article-title>Systemizing empathy: teaching adults with Asperger syndrome or high-functioning autism to recognize complex emotions using interactive multimedia</article-title>
          <source>Dev Psychopathol</source>
          <year>2006</year>
          <volume>18</volume>
          <issue>2</issue>
          <fpage>591</fpage>
          <lpage>617</lpage>
          <pub-id pub-id-type="doi">10.1017/S0954579406060305</pub-id>
          <pub-id pub-id-type="medline">16600069</pub-id>
          <pub-id pub-id-type="pii">S0954579406060305</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tanaka</surname>
              <given-names>Hiroki</given-names>
            </name>
            <name name-style="western">
              <surname>Negoro</surname>
              <given-names>Hideki</given-names>
            </name>
            <name name-style="western">
              <surname>Iwasaka</surname>
              <given-names>Hidemi</given-names>
            </name>
            <name name-style="western">
              <surname>Nakamura</surname>
              <given-names>Satoshi</given-names>
            </name>
          </person-group>
          <article-title>Embodied conversational agents for multimodal automated social skills training in people with autism spectrum disorders</article-title>
          <source>PLoS One</source>
          <year>2017</year>
          <volume>12</volume>
          <issue>8</issue>
          <fpage>e0182151</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0182151"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0182151</pub-id>
          <pub-id pub-id-type="medline">28796781</pub-id>
          <pub-id pub-id-type="pii">PONE-D-17-04936</pub-id>
          <pub-id pub-id-type="pmcid">PMC5552034</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Poyade</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Morris</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Taylor</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Portela</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Using mobile virtual reality to empower people with hidden disabilities to overcome their barriers</article-title>
          <year>2017</year>
          <conf-name>The 19th ACM International Conference on Multimodal Interaction</conf-name>
          <conf-date>Nov 13-17, 2017</conf-date>
          <conf-loc>Glasgow, Scotland</conf-loc>
          <fpage>504</fpage>
          <lpage>505</lpage>
          <pub-id pub-id-type="doi">10.1145/3136755.3143025</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tanaka</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Negoro</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Iwasaka</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Nakamura</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Embodied conversational agents for multimodal automated social skills training in people with autism spectrum disorders</article-title>
          <source>PLoS One</source>
          <year>2017</year>
          <month>8</month>
          <day>10</day>
          <volume>12</volume>
          <issue>8</issue>
          <fpage>e0182151</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0182151"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0182151</pub-id>
          <pub-id pub-id-type="medline">28796781</pub-id>
          <pub-id pub-id-type="pii">PONE-D-17-04936</pub-id>
          <pub-id pub-id-type="pmcid">PMC5552034</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tanaka</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Iwasaka</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Negoro</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Nakamura</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Analysis of conversational listening skills toward agent-based social skills training</article-title>
          <source>J Multimodal User Interfaces</source>
          <year>2019</year>
          <month>10</month>
          <day>16</day>
          <volume>14</volume>
          <issue>1</issue>
          <fpage>73</fpage>
          <lpage>82</lpage>
          <pub-id pub-id-type="doi">10.1007/s12193-019-00313-y</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tanaka</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Iwasaka</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Matsuda</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Okazaki</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Nakamura</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Analyzing Self-Efficacy and Summary Feedback in Automated Social Skills Training</article-title>
          <source>IEEE Open J. Eng. Med. Biol</source>
          <year>2021</year>
          <volume>2</volume>
          <fpage>65</fpage>
          <lpage>70</lpage>
          <pub-id pub-id-type="doi">10.1109/ojemb.2021.3075567</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ali</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Rasazi</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Mamun</surname>
              <given-names>AA</given-names>
            </name>
            <name name-style="western">
              <surname>Langevin</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Rawassizadeh</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Schubert</surname>
              <given-names>LK</given-names>
            </name>
            <name name-style="western">
              <surname>Hoque</surname>
              <given-names>ME</given-names>
            </name>
          </person-group>
          <article-title>A Virtual Conversational Agent for Teens with Autism: Experimental Results and Design Lessons</article-title>
          <source>CoRR</source>
          <year>2018</year>
          <fpage>E</fpage>
          <pub-id pub-id-type="doi">10.1145/3383652.3423900</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hoque</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Courgeon</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Martin</surname>
              <given-names>J-C</given-names>
            </name>
            <name name-style="western">
              <surname>Mutlu</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Picard</surname>
              <given-names>RW</given-names>
            </name>
          </person-group>
          <article-title>MACH: my automated conversation coach, UbiComp, pp</article-title>
          <year>2013</year>
          <conf-name>the 2013 ACM international joint conference on pervasive and ubiquitous computing (UbiComp '13)</conf-name>
          <conf-date>Sep 8-12, 2013</conf-date>
          <conf-loc>Zurich, Switzerland</conf-loc>
          <pub-id pub-id-type="doi">10.1145/2493432.2493502</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tudor Car</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Dhinagaran</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Kyaw</surname>
              <given-names>BM</given-names>
            </name>
            <name name-style="western">
              <surname>Kowatsch</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Joty</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Theng</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Atun</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Conversational Agents in Health Care: Scoping Review and Conceptual Analysis</article-title>
          <source>J Med Internet Res</source>
          <year>2020</year>
          <month>08</month>
          <day>07</day>
          <volume>22</volume>
          <issue>8</issue>
          <fpage>e17158</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2020/8/e17158/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/17158</pub-id>
          <pub-id pub-id-type="medline">32763886</pub-id>
          <pub-id pub-id-type="pii">v22i8e17158</pub-id>
          <pub-id pub-id-type="pmcid">PMC7442948</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Milne-Ives</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>de Cock</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Shehadeh</surname>
              <given-names>MH</given-names>
            </name>
            <name name-style="western">
              <surname>de Pennington</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Mole</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Normando</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Meinert</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>The Effectiveness of Artificial Intelligence Conversational Agents in Health Care: Systematic Review</article-title>
          <source>J Med Internet Res</source>
          <year>2020</year>
          <month>10</month>
          <day>22</day>
          <volume>22</volume>
          <issue>10</issue>
          <fpage>e20346</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2020/10/e20346/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/20346</pub-id>
          <pub-id pub-id-type="medline">33090118</pub-id>
          <pub-id pub-id-type="pii">v22i10e20346</pub-id>
          <pub-id pub-id-type="pmcid">PMC7644372</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tanaka</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Sakriani</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Neubig</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Toda</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Negoro</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Iwasaka</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Nakamura</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Teaching Social Communication Skills Through Human-Agent Interaction</article-title>
          <source>ACM Trans. Interact. Intell. Syst</source>
          <year>2016</year>
          <month>08</month>
          <day>03</day>
          <volume>6</volume>
          <issue>2</issue>
          <fpage>1</fpage>
          <lpage>26</lpage>
          <pub-id pub-id-type="doi">10.1145/2937757</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shidara</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Tanaka</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Adachi</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Kanayama</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Sakagami</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Kudo</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Nakamura</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Automatic Thoughts and Facial Expressions in Cognitive Restructuring With Virtual Agents</article-title>
          <source>Front. Comput. Sci</source>
          <year>2022</year>
          <month>2</month>
          <day>2</day>
          <volume>4</volume>
          <pub-id pub-id-type="doi">10.3389/fcomp.2022.762424</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ardito</surname>
              <given-names>RB</given-names>
            </name>
            <name name-style="western">
              <surname>Rabellino</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Therapeutic alliance and outcome of psychotherapy: historical excursus, measurements, and prospects for research</article-title>
          <source>Front Psychol</source>
          <year>2011</year>
          <volume>2</volume>
          <fpage>270</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3389/fpsyg.2011.00270"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fpsyg.2011.00270</pub-id>
          <pub-id pub-id-type="medline">22028698</pub-id>
          <pub-id pub-id-type="pmcid">PMC3198542</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kumazaki</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Warren</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Muramatsu</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Yoshikawa</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Matsumoto</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Miyao</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Nakano</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mizushima</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wakita</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ishiguro</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Mimura</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Minabe</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Kikuchi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>A pilot study for robot appearance preferences among high-functioning individuals with autism spectrum disorder: Implications for therapeutic use</article-title>
          <source>PLoS One</source>
          <year>2017</year>
          <month>10</month>
          <day>13</day>
          <volume>12</volume>
          <issue>10</issue>
          <fpage>e0186581</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0186581"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0186581</pub-id>
          <pub-id pub-id-type="medline">29028837</pub-id>
          <pub-id pub-id-type="pii">PONE-D-17-30493</pub-id>
          <pub-id pub-id-type="pmcid">PMC5640226</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Robins</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Dautenhahn</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Te Boekhorst</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Billard</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Robots as assistive technology - does appearance matter?</article-title>
          <year>2004</year>
          <conf-name>International Workshop on Robot and Human Interactive Communication, 13th IEEE ROMAN</conf-name>
          <conf-date>Sep 22-24, 2004</conf-date>
          <conf-loc>Kurashiki, Japan</conf-loc>
          <fpage>277</fpage>
          <lpage>282</lpage>
          <pub-id pub-id-type="doi">10.1109/roman.2004.1374773</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Esposito</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Amorese</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Cucinello</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Esposito</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Troncone</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ines Torres</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Schlogl</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Cordasco</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Seniors' Acceptance of Virtual Humanoid Agents</article-title>
          <source>Italian Forum of Ambient Assisted Living</source>
          <year>2018</year>
          <fpage>429</fpage>
          <pub-id pub-id-type="doi">10.48550/arXiv.2105.00506</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Esposito</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Amorese</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Cuciniello</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Pica</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Riviello</surname>
              <given-names>MT</given-names>
            </name>
            <name name-style="western">
              <surname>Troncone</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Cordasco</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Esposito</surname>
              <given-names>AM</given-names>
            </name>
          </person-group>
          <article-title>Elders Prefer Female Robots with a High Degree of Human Likeness</article-title>
          <year>2017</year>
          <conf-name>IEEE 23rd International Symposium on Consumer Technologies</conf-name>
          <conf-date>Jun 19-21, 2017</conf-date>
          <conf-loc>Ancona, Italy</conf-loc>
          <fpage>243</fpage>
          <lpage>246</lpage>
          <pub-id pub-id-type="doi">10.1109/isce.2019.8900983</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Terada</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Jing</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Yamada</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Effects of Agent Appearance on Customer Buying Motivations on Online Shopping Sites</article-title>
          <year>2015</year>
          <conf-name>the 33rd Annual ACM Conference Extended Abstracts on Human Factors in Computing Systems</conf-name>
          <conf-date>2015</conf-date>
          <conf-loc>Seoul, Korea</conf-loc>
          <fpage>929</fpage>
          <lpage>934</lpage>
          <pub-id pub-id-type="doi">10.1145/2702613.2732798</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>McDonnell</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Breidt</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Bülthoff</surname>
              <given-names>HH</given-names>
            </name>
          </person-group>
          <article-title>Render me real?: investigating the effect of render style on the perception of animated virtual humans</article-title>
          <source>ACM Trans. Graph</source>
          <year>2012</year>
          <month>08</month>
          <day>05</day>
          <volume>31</volume>
          <issue>4</issue>
          <fpage>1</fpage>
          <lpage>11</lpage>
          <pub-id pub-id-type="doi">10.1145/2185520.2185587</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ring</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Utami</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Bickmore</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>The Right Agent for the Job? The Effects of Agent Visual Appearance on Task Domain</article-title>
          <year>2014</year>
          <conf-name>International Conference on Intelligent Virtual Agents</conf-name>
          <conf-date>Aug 27-29, 2014</conf-date>
          <conf-loc>Boston, MA</conf-loc>
          <fpage>374</fpage>
          <lpage>384</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-319-09767-1_49</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Esposito</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Amorese</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Cuciniello</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Riviello</surname>
              <given-names>MT</given-names>
            </name>
            <name name-style="western">
              <surname>Esposito</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Troncone</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Cordasco</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>The Dependability of Voice on Elders' Acceptance of Humanoid Agents</article-title>
          <year>2019</year>
          <conf-name>Interspeech 2019</conf-name>
          <conf-date>Sep 15-19, 2019</conf-date>
          <conf-loc>Graz, Austria</conf-loc>
          <fpage>31</fpage>
          <pub-id pub-id-type="doi">10.21437/interspeech.2019-1734</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Baylor</surname>
              <given-names>AL</given-names>
            </name>
            <name name-style="western">
              <surname>Yanghee</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Pedagogical Agent Design: The Impact of Agent Realism, Gender, Ethnicity, and Instructional Role</article-title>
          <year>2004</year>
          <conf-name>Intelligent Tutoring Systems, 7th International Conference, ITS 2004</conf-name>
          <conf-date>Aug 30-Sep 3, 2004</conf-date>
          <conf-loc>Maceiò, Alagoas, Brazil</conf-loc>
          <fpage>592</fpage>
          <pub-id pub-id-type="doi">10.1007/978-3-540-30139-4_56</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Troncone</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Amorese</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Cuciniello</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Saturno</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Pugliese</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Cordasco</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Vogel</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Esposito</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Advanced Assistive Technologies for Elderly People: A Psychological Perspective on Seniors’ Needs and Preferences (part A)</article-title>
          <source>ACTA POLYTECH HUNG</source>
          <year>2020</year>
          <volume>17</volume>
          <issue>2</issue>
          <fpage>163</fpage>
          <lpage>189</lpage>
          <pub-id pub-id-type="doi">10.12700/aph.17.2.2020.2.10</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tanaka</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Nakamura</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Virtual Agent Design for Social Skills Training Considering Autistic Traits</article-title>
          <year>2020</year>
          <conf-name>42nd Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)</conf-name>
          <conf-date>Jul 20-24, 2020</conf-date>
          <conf-loc>Montreal, Canada</conf-loc>
          <fpage>4959</fpage>
          <lpage>4962</lpage>
          <pub-id pub-id-type="doi">10.1109/embc46164.2021.9630741</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Poggi</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Pelachaud</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>De Rosis</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Carofiglio</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>De Carolis</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Greta. A Believable Embodied Conversational Agent</article-title>
          <source>Multimodal Intelligent Information Presentation</source>
          <year>2005</year>
          <publisher-loc>Dordrecht</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>3</fpage>
          <lpage>25</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="web">
          <source>Autodesk Character Generator</source>
          <access-date>2022-03-21</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://charactergenerator.autodesk.com/">https://charactergenerator.autodesk.com/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Constantino</surname>
              <given-names>JN</given-names>
            </name>
          </person-group>
          <article-title>Social Responsiveness Scale - Second Edition (SRS-2), WPS</article-title>
          <source>Social Responsiveness Scale - Second Edition (SRS-2)</source>
          <year>2012</year>
          <publisher-loc>Los Angeles, CA</publisher-loc>
          <publisher-name>Western Psychological Services</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bagby</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Parker</surname>
              <given-names>JD</given-names>
            </name>
            <name name-style="western">
              <surname>Taylor</surname>
              <given-names>GJ</given-names>
            </name>
          </person-group>
          <article-title>The twenty-item Toronto Alexithymia scale—I. Item selection and cross-validation of the factor structure</article-title>
          <source>Journal of Psychosomatic Research</source>
          <year>1994</year>
          <month>1</month>
          <volume>38</volume>
          <issue>1</issue>
          <fpage>23</fpage>
          <lpage>32</lpage>
          <pub-id pub-id-type="doi">10.1016/0022-3999(94)90005-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bezemer</surname>
              <given-names>ML</given-names>
            </name>
            <name name-style="western">
              <surname>Blijd-Hoogewys</surname>
              <given-names>EMA</given-names>
            </name>
            <name name-style="western">
              <surname>Meek-Heekelaar</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>The Predictive Value of the AQ and the SRS-A in the Diagnosis of ASD in Adults in Clinical Practice</article-title>
          <source>J Autism Dev Disord</source>
          <year>2021</year>
          <month>07</month>
          <day>01</day>
          <volume>51</volume>
          <issue>7</issue>
          <fpage>2402</fpage>
          <lpage>2415</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/33001348"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s10803-020-04699-7</pub-id>
          <pub-id pub-id-type="medline">33001348</pub-id>
          <pub-id pub-id-type="pii">10.1007/s10803-020-04699-7</pub-id>
          <pub-id pub-id-type="pmcid">PMC8189953</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="web">
          <source>Anime Art Museum</source>
          <access-date>2022-03-21</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://animeartmuseum.org/whatisanimeart/">http://animeartmuseum.org/whatisanimeart/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Koschate</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Potter</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Bremner</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Levine</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Overcoming the uncanny valley: Displays of emotions reduce the uncanniness of humanlike robots</article-title>
          <year>2016</year>
          <conf-name>11th ACM/IEEE International Conference on Human-Robot Interaction (HRI)</conf-name>
          <conf-date>Mar 7-10, 2016</conf-date>
          <conf-loc>Christchurch, New Zealand</conf-loc>
          <pub-id pub-id-type="doi">10.1109/hri.2016.7451773</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mori</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>The uncanny valley</article-title>
          <source>Energy 7</source>
          <year>1970</year>
          <fpage>33</fpage>
          <pub-id pub-id-type="doi">10.5749/j.ctvtv937f.7</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
