@inproceedings{stolfo-2024-groundedness,
title = "Groundedness in Retrieval-augmented Long-form Generation: An Empirical Study",
author = "Stolfo, Alessandro",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2024",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.findings-naacl.100",
doi = "10.18653/v1/2024.findings-naacl.100",
pages = "1537--1552",
abstract = "We present an empirical study of groundedness in long-form question answering (LFQA) by retrieval-augmented large language models (LLMs).In particular, we evaluate whether every generated sentence is grounded in the retrieved documents or the model{'}s pre-training data.Across 3 datasets and 4 model families, our findings reveal that a significant fraction of generated sentences are consistently ungrounded, even when those sentences contain correct ground-truth answers.Additionally, we examine the impacts of factors such as model size, decoding strategy, and instruction tuning on groundedness. Our results show that while larger models tend to ground their outputs more effectively, a significant portion of correct answers remains compromised by hallucinations. This study provides novel insights into the groundedness challenges in LFQA and underscores the necessity for more robust mechanisms in LLMs to mitigate the generation of ungrounded content.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="stolfo-2024-groundedness">
<titleInfo>
<title>Groundedness in Retrieval-augmented Long-form Generation: An Empirical Study</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alessandro</namePart>
<namePart type="family">Stolfo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: NAACL 2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="family">Duh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Helena</namePart>
<namePart type="family">Gomez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Bethard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mexico City, Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We present an empirical study of groundedness in long-form question answering (LFQA) by retrieval-augmented large language models (LLMs).In particular, we evaluate whether every generated sentence is grounded in the retrieved documents or the model’s pre-training data.Across 3 datasets and 4 model families, our findings reveal that a significant fraction of generated sentences are consistently ungrounded, even when those sentences contain correct ground-truth answers.Additionally, we examine the impacts of factors such as model size, decoding strategy, and instruction tuning on groundedness. Our results show that while larger models tend to ground their outputs more effectively, a significant portion of correct answers remains compromised by hallucinations. This study provides novel insights into the groundedness challenges in LFQA and underscores the necessity for more robust mechanisms in LLMs to mitigate the generation of ungrounded content.</abstract>
<identifier type="citekey">stolfo-2024-groundedness</identifier>
<identifier type="doi">10.18653/v1/2024.findings-naacl.100</identifier>
<location>
<url>https://aclanthology.org/2024.findings-naacl.100</url>
</location>
<part>
<date>2024-06</date>
<extent unit="page">
<start>1537</start>
<end>1552</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Groundedness in Retrieval-augmented Long-form Generation: An Empirical Study
%A Stolfo, Alessandro
%Y Duh, Kevin
%Y Gomez, Helena
%Y Bethard, Steven
%S Findings of the Association for Computational Linguistics: NAACL 2024
%D 2024
%8 June
%I Association for Computational Linguistics
%C Mexico City, Mexico
%F stolfo-2024-groundedness
%X We present an empirical study of groundedness in long-form question answering (LFQA) by retrieval-augmented large language models (LLMs).In particular, we evaluate whether every generated sentence is grounded in the retrieved documents or the model’s pre-training data.Across 3 datasets and 4 model families, our findings reveal that a significant fraction of generated sentences are consistently ungrounded, even when those sentences contain correct ground-truth answers.Additionally, we examine the impacts of factors such as model size, decoding strategy, and instruction tuning on groundedness. Our results show that while larger models tend to ground their outputs more effectively, a significant portion of correct answers remains compromised by hallucinations. This study provides novel insights into the groundedness challenges in LFQA and underscores the necessity for more robust mechanisms in LLMs to mitigate the generation of ungrounded content.
%R 10.18653/v1/2024.findings-naacl.100
%U https://aclanthology.org/2024.findings-naacl.100
%U https://doi.org/10.18653/v1/2024.findings-naacl.100
%P 1537-1552
Markdown (Informal)
[Groundedness in Retrieval-augmented Long-form Generation: An Empirical Study](https://aclanthology.org/2024.findings-naacl.100) (Stolfo, Findings 2024)
ACL