@inproceedings{lovenia-etal-2023-one,
title = "Which One Are You Referring To? Multimodal Object Identification in Situated Dialogue",
author = "Lovenia, Holy and
Cahyawijaya, Samuel and
Fung, Pascale",
editor = "Bassignana, Elisa and
Lindemann, Matthias and
Petit, Alban",
booktitle = "Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics: Student Research Workshop",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.eacl-srw.6/",
doi = "10.18653/v1/2023.eacl-srw.6",
pages = "61--72",
abstract = "The demand for multimodal dialogue systems has been rising in various domains, emphasizing the importance of interpreting multimodal inputs from conversational and situational contexts. One main challenge in multimodal dialogue understanding is multimodal object identification, which constitutes the ability to identify objects relevant to a multimodal user-system conversation. We explore three methods to tackle this problem and evaluate them on the largest situated dialogue dataset, SIMMC 2.1. Our best method, scene-dialogue alignment, improves the performance by {\textasciitilde}20{\%} F1-score compared to the SIMMC 2.1 baselines. We provide analysis and discussion regarding the limitation of our methods and the potential directions for future works."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lovenia-etal-2023-one">
<titleInfo>
<title>Which One Are You Referring To? Multimodal Object Identification in Situated Dialogue</title>
</titleInfo>
<name type="personal">
<namePart type="given">Holy</namePart>
<namePart type="family">Lovenia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Samuel</namePart>
<namePart type="family">Cahyawijaya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pascale</namePart>
<namePart type="family">Fung</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics: Student Research Workshop</title>
</titleInfo>
<name type="personal">
<namePart type="given">Elisa</namePart>
<namePart type="family">Bassignana</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Matthias</namePart>
<namePart type="family">Lindemann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alban</namePart>
<namePart type="family">Petit</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dubrovnik, Croatia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The demand for multimodal dialogue systems has been rising in various domains, emphasizing the importance of interpreting multimodal inputs from conversational and situational contexts. One main challenge in multimodal dialogue understanding is multimodal object identification, which constitutes the ability to identify objects relevant to a multimodal user-system conversation. We explore three methods to tackle this problem and evaluate them on the largest situated dialogue dataset, SIMMC 2.1. Our best method, scene-dialogue alignment, improves the performance by ~20% F1-score compared to the SIMMC 2.1 baselines. We provide analysis and discussion regarding the limitation of our methods and the potential directions for future works.</abstract>
<identifier type="citekey">lovenia-etal-2023-one</identifier>
<identifier type="doi">10.18653/v1/2023.eacl-srw.6</identifier>
<location>
<url>https://aclanthology.org/2023.eacl-srw.6/</url>
</location>
<part>
<date>2023-05</date>
<extent unit="page">
<start>61</start>
<end>72</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Which One Are You Referring To? Multimodal Object Identification in Situated Dialogue
%A Lovenia, Holy
%A Cahyawijaya, Samuel
%A Fung, Pascale
%Y Bassignana, Elisa
%Y Lindemann, Matthias
%Y Petit, Alban
%S Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics: Student Research Workshop
%D 2023
%8 May
%I Association for Computational Linguistics
%C Dubrovnik, Croatia
%F lovenia-etal-2023-one
%X The demand for multimodal dialogue systems has been rising in various domains, emphasizing the importance of interpreting multimodal inputs from conversational and situational contexts. One main challenge in multimodal dialogue understanding is multimodal object identification, which constitutes the ability to identify objects relevant to a multimodal user-system conversation. We explore three methods to tackle this problem and evaluate them on the largest situated dialogue dataset, SIMMC 2.1. Our best method, scene-dialogue alignment, improves the performance by ~20% F1-score compared to the SIMMC 2.1 baselines. We provide analysis and discussion regarding the limitation of our methods and the potential directions for future works.
%R 10.18653/v1/2023.eacl-srw.6
%U https://aclanthology.org/2023.eacl-srw.6/
%U https://doi.org/10.18653/v1/2023.eacl-srw.6
%P 61-72
Markdown (Informal)
[Which One Are You Referring To? Multimodal Object Identification in Situated Dialogue](https://aclanthology.org/2023.eacl-srw.6/) (Lovenia et al., EACL 2023)
ACL