@inproceedings{braunschweiler-etal-2023-evaluating,
title = "Evaluating Large Language Models for Document-grounded Response Generation in Information-Seeking Dialogues",
author = "Braunschweiler, Norbert and
Doddipatla, Rama and
Keizer, Simon and
Stoyanchev, Svetlana",
editor = "Hazarika, Devamanyu and
Tang, Xiangru Robert and
Jin, Di",
booktitle = "Proceedings of the 1st Workshop on Taming Large Language Models: Controllability in the era of Interactive Assistants!",
month = sep,
year = "2023",
address = "Prague, Czech Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.tllm-1.5",
pages = "46--55",
abstract = "In this paper, we investigate the use of large language models (LLMs) like ChatGPT for document-grounded response generation in the context of information-seeking dialogues. For evaluation, we use the MultiDoc2Dial corpus of task-oriented dialogues in four social service domains previously used in the DialDoc 2022 Shared Task. Information-seeking dialogue turns are grounded in multiple documents providing relevant information. We generate dialogue completion responses by prompting a ChatGPT model, using two methods: Chat-Completion and LlamaIndex. ChatCompletion uses knowledge from ChatGPT model pre-training while LlamaIndex also extracts relevant information from documents. Observing that document-grounded response generation via LLMs cannot be adequately assessed by automatic evaluation metrics as they are significantly more verbose, we perform a human evaluation where annotators rate the output of the shared task winning system, the two ChatGPT variants outputs, and human responses. While both ChatGPT variants are more likely to include information not present in the relevant segments, possibly including a presence of hallucinations, they are rated higher than both the shared task winning system and human responses.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="braunschweiler-etal-2023-evaluating">
<titleInfo>
<title>Evaluating Large Language Models for Document-grounded Response Generation in Information-Seeking Dialogues</title>
</titleInfo>
<name type="personal">
<namePart type="given">Norbert</namePart>
<namePart type="family">Braunschweiler</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rama</namePart>
<namePart type="family">Doddipatla</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Simon</namePart>
<namePart type="family">Keizer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Svetlana</namePart>
<namePart type="family">Stoyanchev</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on Taming Large Language Models: Controllability in the era of Interactive Assistants!</title>
</titleInfo>
<name type="personal">
<namePart type="given">Devamanyu</namePart>
<namePart type="family">Hazarika</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiangru</namePart>
<namePart type="given">Robert</namePart>
<namePart type="family">Tang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Di</namePart>
<namePart type="family">Jin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Prague, Czech Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper, we investigate the use of large language models (LLMs) like ChatGPT for document-grounded response generation in the context of information-seeking dialogues. For evaluation, we use the MultiDoc2Dial corpus of task-oriented dialogues in four social service domains previously used in the DialDoc 2022 Shared Task. Information-seeking dialogue turns are grounded in multiple documents providing relevant information. We generate dialogue completion responses by prompting a ChatGPT model, using two methods: Chat-Completion and LlamaIndex. ChatCompletion uses knowledge from ChatGPT model pre-training while LlamaIndex also extracts relevant information from documents. Observing that document-grounded response generation via LLMs cannot be adequately assessed by automatic evaluation metrics as they are significantly more verbose, we perform a human evaluation where annotators rate the output of the shared task winning system, the two ChatGPT variants outputs, and human responses. While both ChatGPT variants are more likely to include information not present in the relevant segments, possibly including a presence of hallucinations, they are rated higher than both the shared task winning system and human responses.</abstract>
<identifier type="citekey">braunschweiler-etal-2023-evaluating</identifier>
<location>
<url>https://aclanthology.org/2023.tllm-1.5</url>
</location>
<part>
<date>2023-09</date>
<extent unit="page">
<start>46</start>
<end>55</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Evaluating Large Language Models for Document-grounded Response Generation in Information-Seeking Dialogues
%A Braunschweiler, Norbert
%A Doddipatla, Rama
%A Keizer, Simon
%A Stoyanchev, Svetlana
%Y Hazarika, Devamanyu
%Y Tang, Xiangru Robert
%Y Jin, Di
%S Proceedings of the 1st Workshop on Taming Large Language Models: Controllability in the era of Interactive Assistants!
%D 2023
%8 September
%I Association for Computational Linguistics
%C Prague, Czech Republic
%F braunschweiler-etal-2023-evaluating
%X In this paper, we investigate the use of large language models (LLMs) like ChatGPT for document-grounded response generation in the context of information-seeking dialogues. For evaluation, we use the MultiDoc2Dial corpus of task-oriented dialogues in four social service domains previously used in the DialDoc 2022 Shared Task. Information-seeking dialogue turns are grounded in multiple documents providing relevant information. We generate dialogue completion responses by prompting a ChatGPT model, using two methods: Chat-Completion and LlamaIndex. ChatCompletion uses knowledge from ChatGPT model pre-training while LlamaIndex also extracts relevant information from documents. Observing that document-grounded response generation via LLMs cannot be adequately assessed by automatic evaluation metrics as they are significantly more verbose, we perform a human evaluation where annotators rate the output of the shared task winning system, the two ChatGPT variants outputs, and human responses. While both ChatGPT variants are more likely to include information not present in the relevant segments, possibly including a presence of hallucinations, they are rated higher than both the shared task winning system and human responses.
%U https://aclanthology.org/2023.tllm-1.5
%P 46-55
Markdown (Informal)
[Evaluating Large Language Models for Document-grounded Response Generation in Information-Seeking Dialogues](https://aclanthology.org/2023.tllm-1.5) (Braunschweiler et al., TLLM-WS 2023)
ACL