@inproceedings{liu-etal-2023-enhancing-multilingual,
title = "Enhancing Multilingual Document-Grounded Dialogue Using Cascaded Prompt-Based Post-Training Models",
author = "Liu, Jun and
Cheng, Shuang and
Zhou, Zineng and
Gu, Yang and
Ye, Jian and
Luo, Haiyong",
editor = "Muresan, Smaranda and
Chen, Vivian and
Casey, Kennington and
David, Vandyke and
Nina, Dethlefs and
Koji, Inoue and
Erik, Ekstedt and
Stefan, Ultes",
booktitle = "Proceedings of the Third DialDoc Workshop on Document-grounded Dialogue and Conversational Question Answering",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.dialdoc-1.5",
doi = "10.18653/v1/2023.dialdoc-1.5",
pages = "44--51",
abstract = "The Dialdoc23 shared task presents a Multilingual Document-Grounded Dialogue Systems (MDGDS) challenge, where system responses are generated in multiple languages using user{'}s queries, historical dialogue records and relevant passages. A major challenge for this task is the limited training data available in low-resource languages such as French and Vietnamese. In this paper, we propose Cascaded Prompt-based Post-training Models, dividing the task into three subtasks: Retrieval, Reranking and Generation. We conduct post-training on high-resource language such as English and Chinese to enhance performance of low-resource languages by using the similarities of languages. Additionally, we utilize the prompt method to activate model{'}s ability on diverse languages within the dialogue domain and explore which prompt is a good prompt. Our comprehensive experiments demonstrate the effectiveness of our proposed methods, which achieved the first place on the leaderboard with a total score of 215.40 in token-level F1, SacreBleu, and Rouge-L metrics.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="liu-etal-2023-enhancing-multilingual">
<titleInfo>
<title>Enhancing Multilingual Document-Grounded Dialogue Using Cascaded Prompt-Based Post-Training Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jun</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shuang</namePart>
<namePart type="family">Cheng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zineng</namePart>
<namePart type="family">Zhou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="family">Gu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jian</namePart>
<namePart type="family">Ye</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Haiyong</namePart>
<namePart type="family">Luo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Third DialDoc Workshop on Document-grounded Dialogue and Conversational Question Answering</title>
</titleInfo>
<name type="personal">
<namePart type="given">Smaranda</namePart>
<namePart type="family">Muresan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vivian</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kennington</namePart>
<namePart type="family">Casey</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vandyke</namePart>
<namePart type="family">David</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dethlefs</namePart>
<namePart type="family">Nina</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Inoue</namePart>
<namePart type="family">Koji</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekstedt</namePart>
<namePart type="family">Erik</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ultes</namePart>
<namePart type="family">Stefan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Toronto, Canada</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The Dialdoc23 shared task presents a Multilingual Document-Grounded Dialogue Systems (MDGDS) challenge, where system responses are generated in multiple languages using user’s queries, historical dialogue records and relevant passages. A major challenge for this task is the limited training data available in low-resource languages such as French and Vietnamese. In this paper, we propose Cascaded Prompt-based Post-training Models, dividing the task into three subtasks: Retrieval, Reranking and Generation. We conduct post-training on high-resource language such as English and Chinese to enhance performance of low-resource languages by using the similarities of languages. Additionally, we utilize the prompt method to activate model’s ability on diverse languages within the dialogue domain and explore which prompt is a good prompt. Our comprehensive experiments demonstrate the effectiveness of our proposed methods, which achieved the first place on the leaderboard with a total score of 215.40 in token-level F1, SacreBleu, and Rouge-L metrics.</abstract>
<identifier type="citekey">liu-etal-2023-enhancing-multilingual</identifier>
<identifier type="doi">10.18653/v1/2023.dialdoc-1.5</identifier>
<location>
<url>https://aclanthology.org/2023.dialdoc-1.5</url>
</location>
<part>
<date>2023-07</date>
<extent unit="page">
<start>44</start>
<end>51</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Enhancing Multilingual Document-Grounded Dialogue Using Cascaded Prompt-Based Post-Training Models
%A Liu, Jun
%A Cheng, Shuang
%A Zhou, Zineng
%A Gu, Yang
%A Ye, Jian
%A Luo, Haiyong
%Y Muresan, Smaranda
%Y Chen, Vivian
%Y Casey, Kennington
%Y David, Vandyke
%Y Nina, Dethlefs
%Y Koji, Inoue
%Y Erik, Ekstedt
%Y Stefan, Ultes
%S Proceedings of the Third DialDoc Workshop on Document-grounded Dialogue and Conversational Question Answering
%D 2023
%8 July
%I Association for Computational Linguistics
%C Toronto, Canada
%F liu-etal-2023-enhancing-multilingual
%X The Dialdoc23 shared task presents a Multilingual Document-Grounded Dialogue Systems (MDGDS) challenge, where system responses are generated in multiple languages using user’s queries, historical dialogue records and relevant passages. A major challenge for this task is the limited training data available in low-resource languages such as French and Vietnamese. In this paper, we propose Cascaded Prompt-based Post-training Models, dividing the task into three subtasks: Retrieval, Reranking and Generation. We conduct post-training on high-resource language such as English and Chinese to enhance performance of low-resource languages by using the similarities of languages. Additionally, we utilize the prompt method to activate model’s ability on diverse languages within the dialogue domain and explore which prompt is a good prompt. Our comprehensive experiments demonstrate the effectiveness of our proposed methods, which achieved the first place on the leaderboard with a total score of 215.40 in token-level F1, SacreBleu, and Rouge-L metrics.
%R 10.18653/v1/2023.dialdoc-1.5
%U https://aclanthology.org/2023.dialdoc-1.5
%U https://doi.org/10.18653/v1/2023.dialdoc-1.5
%P 44-51
Markdown (Informal)
[Enhancing Multilingual Document-Grounded Dialogue Using Cascaded Prompt-Based Post-Training Models](https://aclanthology.org/2023.dialdoc-1.5) (Liu et al., dialdoc 2023)
ACL