@inproceedings{jiang-2024-towards,
title = "Towards a Real-Time Multimodal Emotion Estimation Model for Dialogue Systems",
author = "Jiang, Jingjing",
editor = "Inoue, Koji and
Fu, Yahui and
Axelsson, Agnes and
Ohashi, Atsumoto and
Madureira, Brielen and
Zenimoto, Yuki and
Mohapatra, Biswesh and
Stricker, Armand and
Khosla, Sopan",
booktitle = "Proceedings of the 20th Workshop of Young Researchers' Roundtable on Spoken Dialogue Systems",
month = sep,
year = "2024",
address = "Kyoto, Japan",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.yrrsds-1.22/",
pages = "60--61",
abstract = "This position paper presents my research interest in establishing human-like chat-oriented dialogue systems. To this end, my work focuses on two main areas: the construction and utilization of multimodal datasets and real-time multimodal affective computing. I discuss the limitations of current multimodal dialogue corpora and multimodal affective computing models. As a solution, I have constructed a human-human dialogue dataset containing various synchronized multimodal information, and I have conducted preliminary analyses on it. In future work, I will further analyze the collected data and build a real-time multimodal emotion estimation model for dialogue systems."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="jiang-2024-towards">
<titleInfo>
<title>Towards a Real-Time Multimodal Emotion Estimation Model for Dialogue Systems</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jingjing</namePart>
<namePart type="family">Jiang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 20th Workshop of Young Researchers’ Roundtable on Spoken Dialogue Systems</title>
</titleInfo>
<name type="personal">
<namePart type="given">Koji</namePart>
<namePart type="family">Inoue</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yahui</namePart>
<namePart type="family">Fu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Agnes</namePart>
<namePart type="family">Axelsson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Atsumoto</namePart>
<namePart type="family">Ohashi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Brielen</namePart>
<namePart type="family">Madureira</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yuki</namePart>
<namePart type="family">Zenimoto</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Biswesh</namePart>
<namePart type="family">Mohapatra</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Armand</namePart>
<namePart type="family">Stricker</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sopan</namePart>
<namePart type="family">Khosla</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Kyoto, Japan</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This position paper presents my research interest in establishing human-like chat-oriented dialogue systems. To this end, my work focuses on two main areas: the construction and utilization of multimodal datasets and real-time multimodal affective computing. I discuss the limitations of current multimodal dialogue corpora and multimodal affective computing models. As a solution, I have constructed a human-human dialogue dataset containing various synchronized multimodal information, and I have conducted preliminary analyses on it. In future work, I will further analyze the collected data and build a real-time multimodal emotion estimation model for dialogue systems.</abstract>
<identifier type="citekey">jiang-2024-towards</identifier>
<location>
<url>https://aclanthology.org/2024.yrrsds-1.22/</url>
</location>
<part>
<date>2024-09</date>
<extent unit="page">
<start>60</start>
<end>61</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Towards a Real-Time Multimodal Emotion Estimation Model for Dialogue Systems
%A Jiang, Jingjing
%Y Inoue, Koji
%Y Fu, Yahui
%Y Axelsson, Agnes
%Y Ohashi, Atsumoto
%Y Madureira, Brielen
%Y Zenimoto, Yuki
%Y Mohapatra, Biswesh
%Y Stricker, Armand
%Y Khosla, Sopan
%S Proceedings of the 20th Workshop of Young Researchers’ Roundtable on Spoken Dialogue Systems
%D 2024
%8 September
%I Association for Computational Linguistics
%C Kyoto, Japan
%F jiang-2024-towards
%X This position paper presents my research interest in establishing human-like chat-oriented dialogue systems. To this end, my work focuses on two main areas: the construction and utilization of multimodal datasets and real-time multimodal affective computing. I discuss the limitations of current multimodal dialogue corpora and multimodal affective computing models. As a solution, I have constructed a human-human dialogue dataset containing various synchronized multimodal information, and I have conducted preliminary analyses on it. In future work, I will further analyze the collected data and build a real-time multimodal emotion estimation model for dialogue systems.
%U https://aclanthology.org/2024.yrrsds-1.22/
%P 60-61
Markdown (Informal)
[Towards a Real-Time Multimodal Emotion Estimation Model for Dialogue Systems](https://aclanthology.org/2024.yrrsds-1.22/) (Jiang, YRRSDS 2024)
ACL