@inproceedings{jung-etal-2022-emotion,
title = "An Emotion-based {K}orean Multimodal Empathetic Dialogue System",
author = "Jung, Minyoung and
Lim, Yeongbeom and
Kim, San and
Jang, Jin Yea and
Shin, Saim and
Lee, Ki-Hoon",
editor = "Wu, Xianchao and
Ruan, Peiying and
Li, Sheng and
Dong, Yi",
booktitle = "Proceedings of the Second Workshop on When Creative AI Meets Conversational AI",
month = oct,
year = "2022",
address = "Gyeongju, Republic of Korea",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.cai-1.3",
pages = "16--22",
abstract = "We propose a Korean multimodal dialogue system targeting emotion-based empathetic dialogues because most research in this field has been conducted in a few languages such as English and Japanese and in certain circumstances. Our dialogue system consists of an emotion detector, an empathetic response generator, a monitoring interface, a voice activity detector, a speech recognizer, a speech synthesizer, a gesture classification, and several controllers to provide both multimodality and empathy during a conversation between a human and a machine. For comparisons across visual influence on users, our dialogue system contains two versions of the user interface, a cat face-based user interface and an avatar-based user interface. We evaluated our dialogue system by investigating the dialogues in text and the average mean opinion scores under three different visual conditions, no visual, the cat face-based, and the avatar-based expressions. The experimental results stand for the importance of adequate visual expressions according to user utterances.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="jung-etal-2022-emotion">
<titleInfo>
<title>An Emotion-based Korean Multimodal Empathetic Dialogue System</title>
</titleInfo>
<name type="personal">
<namePart type="given">Minyoung</namePart>
<namePart type="family">Jung</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yeongbeom</namePart>
<namePart type="family">Lim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">San</namePart>
<namePart type="family">Kim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jin</namePart>
<namePart type="given">Yea</namePart>
<namePart type="family">Jang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Saim</namePart>
<namePart type="family">Shin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ki-Hoon</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on When Creative AI Meets Conversational AI</title>
</titleInfo>
<name type="personal">
<namePart type="given">Xianchao</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Peiying</namePart>
<namePart type="family">Ruan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sheng</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yi</namePart>
<namePart type="family">Dong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Gyeongju, Republic of Korea</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We propose a Korean multimodal dialogue system targeting emotion-based empathetic dialogues because most research in this field has been conducted in a few languages such as English and Japanese and in certain circumstances. Our dialogue system consists of an emotion detector, an empathetic response generator, a monitoring interface, a voice activity detector, a speech recognizer, a speech synthesizer, a gesture classification, and several controllers to provide both multimodality and empathy during a conversation between a human and a machine. For comparisons across visual influence on users, our dialogue system contains two versions of the user interface, a cat face-based user interface and an avatar-based user interface. We evaluated our dialogue system by investigating the dialogues in text and the average mean opinion scores under three different visual conditions, no visual, the cat face-based, and the avatar-based expressions. The experimental results stand for the importance of adequate visual expressions according to user utterances.</abstract>
<identifier type="citekey">jung-etal-2022-emotion</identifier>
<location>
<url>https://aclanthology.org/2022.cai-1.3</url>
</location>
<part>
<date>2022-10</date>
<extent unit="page">
<start>16</start>
<end>22</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T An Emotion-based Korean Multimodal Empathetic Dialogue System
%A Jung, Minyoung
%A Lim, Yeongbeom
%A Kim, San
%A Jang, Jin Yea
%A Shin, Saim
%A Lee, Ki-Hoon
%Y Wu, Xianchao
%Y Ruan, Peiying
%Y Li, Sheng
%Y Dong, Yi
%S Proceedings of the Second Workshop on When Creative AI Meets Conversational AI
%D 2022
%8 October
%I Association for Computational Linguistics
%C Gyeongju, Republic of Korea
%F jung-etal-2022-emotion
%X We propose a Korean multimodal dialogue system targeting emotion-based empathetic dialogues because most research in this field has been conducted in a few languages such as English and Japanese and in certain circumstances. Our dialogue system consists of an emotion detector, an empathetic response generator, a monitoring interface, a voice activity detector, a speech recognizer, a speech synthesizer, a gesture classification, and several controllers to provide both multimodality and empathy during a conversation between a human and a machine. For comparisons across visual influence on users, our dialogue system contains two versions of the user interface, a cat face-based user interface and an avatar-based user interface. We evaluated our dialogue system by investigating the dialogues in text and the average mean opinion scores under three different visual conditions, no visual, the cat face-based, and the avatar-based expressions. The experimental results stand for the importance of adequate visual expressions according to user utterances.
%U https://aclanthology.org/2022.cai-1.3
%P 16-22
Markdown (Informal)
[An Emotion-based Korean Multimodal Empathetic Dialogue System](https://aclanthology.org/2022.cai-1.3) (Jung et al., CAI 2022)
ACL
- Minyoung Jung, Yeongbeom Lim, San Kim, Jin Yea Jang, Saim Shin, and Ki-Hoon Lee. 2022. An Emotion-based Korean Multimodal Empathetic Dialogue System. In Proceedings of the Second Workshop on When Creative AI Meets Conversational AI, pages 16–22, Gyeongju, Republic of Korea. Association for Computational Linguistics.