@inproceedings{hashimoto-etal-2024-beam,
title = "How Does Beam Search improve Span-Level Confidence Estimation in Generative Sequence Labeling?",
author = "Hashimoto, Kazuma and
Naim, Iftekhar and
Raman, Karthik",
editor = {V{\'a}zquez, Ra{\'u}l and
Celikkanat, Hande and
Ulmer, Dennis and
Tiedemann, J{\"o}rg and
Swayamdipta, Swabha and
Aziz, Wilker and
Plank, Barbara and
Baan, Joris and
de Marneffe, Marie-Catherine},
booktitle = "Proceedings of the 1st Workshop on Uncertainty-Aware NLP (UncertaiNLP 2024)",
month = mar,
year = "2024",
address = "St Julians, Malta",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.uncertainlp-1.6/",
pages = "62--69",
abstract = "Sequence labeling is a core task in text understanding for IE/IR systems. Text generation models have increasingly become the go-to solution for such tasks (e.g., entity extraction and dialog slot filling). While most research has focused on the labeling accuracy, a key aspect {--} of vital practical importance {--} has slipped through the cracks: understanding model confidence. More specifically, we lack a principled understanding of how to reliably gauge the confidence of a model in its predictions for each labeled span. This paper aims to provide some empirical insights on estimating model confidence for generative sequence labeling. Most notably, we find that simply using the decoder`s output probabilities \textbf{is not} the best in realizing well-calibrated confidence estimates. As verified over six public datasets of different tasks, we show that our proposed approach {--} which leverages statistics from top-$k$ predictions by a beam search {--} significantly reduces calibration errors of the predictions of a generative sequence labeling model."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hashimoto-etal-2024-beam">
<titleInfo>
<title>How Does Beam Search improve Span-Level Confidence Estimation in Generative Sequence Labeling?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kazuma</namePart>
<namePart type="family">Hashimoto</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Iftekhar</namePart>
<namePart type="family">Naim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Karthik</namePart>
<namePart type="family">Raman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-03</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on Uncertainty-Aware NLP (UncertaiNLP 2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Raúl</namePart>
<namePart type="family">Vázquez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hande</namePart>
<namePart type="family">Celikkanat</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dennis</namePart>
<namePart type="family">Ulmer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jörg</namePart>
<namePart type="family">Tiedemann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Swabha</namePart>
<namePart type="family">Swayamdipta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wilker</namePart>
<namePart type="family">Aziz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Barbara</namePart>
<namePart type="family">Plank</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joris</namePart>
<namePart type="family">Baan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marie-Catherine</namePart>
<namePart type="family">de Marneffe</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">St Julians, Malta</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Sequence labeling is a core task in text understanding for IE/IR systems. Text generation models have increasingly become the go-to solution for such tasks (e.g., entity extraction and dialog slot filling). While most research has focused on the labeling accuracy, a key aspect – of vital practical importance – has slipped through the cracks: understanding model confidence. More specifically, we lack a principled understanding of how to reliably gauge the confidence of a model in its predictions for each labeled span. This paper aims to provide some empirical insights on estimating model confidence for generative sequence labeling. Most notably, we find that simply using the decoder‘s output probabilities is not the best in realizing well-calibrated confidence estimates. As verified over six public datasets of different tasks, we show that our proposed approach – which leverages statistics from top-k predictions by a beam search – significantly reduces calibration errors of the predictions of a generative sequence labeling model.</abstract>
<identifier type="citekey">hashimoto-etal-2024-beam</identifier>
<location>
<url>https://aclanthology.org/2024.uncertainlp-1.6/</url>
</location>
<part>
<date>2024-03</date>
<extent unit="page">
<start>62</start>
<end>69</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T How Does Beam Search improve Span-Level Confidence Estimation in Generative Sequence Labeling?
%A Hashimoto, Kazuma
%A Naim, Iftekhar
%A Raman, Karthik
%Y Vázquez, Raúl
%Y Celikkanat, Hande
%Y Ulmer, Dennis
%Y Tiedemann, Jörg
%Y Swayamdipta, Swabha
%Y Aziz, Wilker
%Y Plank, Barbara
%Y Baan, Joris
%Y de Marneffe, Marie-Catherine
%S Proceedings of the 1st Workshop on Uncertainty-Aware NLP (UncertaiNLP 2024)
%D 2024
%8 March
%I Association for Computational Linguistics
%C St Julians, Malta
%F hashimoto-etal-2024-beam
%X Sequence labeling is a core task in text understanding for IE/IR systems. Text generation models have increasingly become the go-to solution for such tasks (e.g., entity extraction and dialog slot filling). While most research has focused on the labeling accuracy, a key aspect – of vital practical importance – has slipped through the cracks: understanding model confidence. More specifically, we lack a principled understanding of how to reliably gauge the confidence of a model in its predictions for each labeled span. This paper aims to provide some empirical insights on estimating model confidence for generative sequence labeling. Most notably, we find that simply using the decoder‘s output probabilities is not the best in realizing well-calibrated confidence estimates. As verified over six public datasets of different tasks, we show that our proposed approach – which leverages statistics from top-k predictions by a beam search – significantly reduces calibration errors of the predictions of a generative sequence labeling model.
%U https://aclanthology.org/2024.uncertainlp-1.6/
%P 62-69
Markdown (Informal)
[How Does Beam Search improve Span-Level Confidence Estimation in Generative Sequence Labeling?](https://aclanthology.org/2024.uncertainlp-1.6/) (Hashimoto et al., UncertaiNLP 2024)
ACL