@article{klie-etal-2024-analyzing,
title = "Analyzing Dataset Annotation Quality Management in the Wild",
author = "Klie, Jan-Christoph and
Eckart de Castilho, Richard and
Gurevych, Iryna",
journal = "Computational Linguistics",
volume = "50",
number = "3",
month = sep,
year = "2024",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://aclanthology.org/2024.cl-3.1/",
doi = "10.1162/coli_a_00516",
pages = "817--866",
abstract = "Data quality is crucial for training accurate, unbiased, and trustworthy machine learning models as well as for their correct evaluation. Recent work, however, has shown that even popular datasets used to train and evaluate state-of-the-art models contain a non-negligible amount of erroneous annotations, biases, or artifacts. While practices and guidelines regarding dataset creation projects exist, to our knowledge, large-scale analysis has yet to be performed on how quality management is conducted when creating natural language datasets and whether these recommendations are followed. Therefore, we first survey and summarize recommended quality management practices for dataset creation as described in the literature and provide suggestions for applying them. Then, we compile a corpus of 591 scientific publications introducing text datasets and annotate it for quality-related aspects, such as annotator management, agreement, adjudication, or data validation. Using these annotations, we then analyze how quality management is conducted in practice. A majority of the annotated publications apply good or excellent quality management. However, we deem the effort of 30{\%} of the studies as only subpar. Our analysis also shows common errors, especially when using inter-annotator agreement and computing annotation error rates."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="klie-etal-2024-analyzing">
<titleInfo>
<title>Analyzing Dataset Annotation Quality Management in the Wild</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jan-Christoph</namePart>
<namePart type="family">Klie</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Richard</namePart>
<namePart type="family">Eckart de Castilho</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Iryna</namePart>
<namePart type="family">Gurevych</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<genre authority="bibutilsgt">journal article</genre>
<relatedItem type="host">
<titleInfo>
<title>Computational Linguistics</title>
</titleInfo>
<originInfo>
<issuance>continuing</issuance>
<publisher>MIT Press</publisher>
<place>
<placeTerm type="text">Cambridge, MA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">periodical</genre>
<genre authority="bibutilsgt">academic journal</genre>
</relatedItem>
<abstract>Data quality is crucial for training accurate, unbiased, and trustworthy machine learning models as well as for their correct evaluation. Recent work, however, has shown that even popular datasets used to train and evaluate state-of-the-art models contain a non-negligible amount of erroneous annotations, biases, or artifacts. While practices and guidelines regarding dataset creation projects exist, to our knowledge, large-scale analysis has yet to be performed on how quality management is conducted when creating natural language datasets and whether these recommendations are followed. Therefore, we first survey and summarize recommended quality management practices for dataset creation as described in the literature and provide suggestions for applying them. Then, we compile a corpus of 591 scientific publications introducing text datasets and annotate it for quality-related aspects, such as annotator management, agreement, adjudication, or data validation. Using these annotations, we then analyze how quality management is conducted in practice. A majority of the annotated publications apply good or excellent quality management. However, we deem the effort of 30% of the studies as only subpar. Our analysis also shows common errors, especially when using inter-annotator agreement and computing annotation error rates.</abstract>
<identifier type="citekey">klie-etal-2024-analyzing</identifier>
<identifier type="doi">10.1162/coli_a_00516</identifier>
<location>
<url>https://aclanthology.org/2024.cl-3.1/</url>
</location>
<part>
<date>2024-09</date>
<detail type="volume"><number>50</number></detail>
<detail type="issue"><number>3</number></detail>
<extent unit="page">
<start>817</start>
<end>866</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Journal Article
%T Analyzing Dataset Annotation Quality Management in the Wild
%A Klie, Jan-Christoph
%A Eckart de Castilho, Richard
%A Gurevych, Iryna
%J Computational Linguistics
%D 2024
%8 September
%V 50
%N 3
%I MIT Press
%C Cambridge, MA
%F klie-etal-2024-analyzing
%X Data quality is crucial for training accurate, unbiased, and trustworthy machine learning models as well as for their correct evaluation. Recent work, however, has shown that even popular datasets used to train and evaluate state-of-the-art models contain a non-negligible amount of erroneous annotations, biases, or artifacts. While practices and guidelines regarding dataset creation projects exist, to our knowledge, large-scale analysis has yet to be performed on how quality management is conducted when creating natural language datasets and whether these recommendations are followed. Therefore, we first survey and summarize recommended quality management practices for dataset creation as described in the literature and provide suggestions for applying them. Then, we compile a corpus of 591 scientific publications introducing text datasets and annotate it for quality-related aspects, such as annotator management, agreement, adjudication, or data validation. Using these annotations, we then analyze how quality management is conducted in practice. A majority of the annotated publications apply good or excellent quality management. However, we deem the effort of 30% of the studies as only subpar. Our analysis also shows common errors, especially when using inter-annotator agreement and computing annotation error rates.
%R 10.1162/coli_a_00516
%U https://aclanthology.org/2024.cl-3.1/
%U https://doi.org/10.1162/coli_a_00516
%P 817-866
Markdown (Informal)
[Analyzing Dataset Annotation Quality Management in the Wild](https://aclanthology.org/2024.cl-3.1/) (Klie et al., CL 2024)
ACL