@article{czarnowska-etal-2021-quantifying,
title = "Quantifying Social Biases in {NLP}: A Generalization and Empirical Comparison of Extrinsic Fairness Metrics",
author = "Czarnowska, Paula and
Vyas, Yogarshi and
Shah, Kashif",
editor = "Roark, Brian and
Nenkova, Ani",
journal = "Transactions of the Association for Computational Linguistics",
volume = "9",
year = "2021",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://aclanthology.org/2021.tacl-1.74",
doi = "10.1162/tacl_a_00425",
pages = "1249--1267",
abstract = "Measuring bias is key for better understanding and addressing unfairness in NLP/ML models. This is often done via fairness metrics, which quantify the differences in a model{'}s behaviour across a range of demographic groups. In this work, we shed more light on the differences and similarities between the fairness metrics used in NLP. First, we unify a broad range of existing metrics under three generalized fairness metrics, revealing the connections between them. Next, we carry out an extensive empirical comparison of existing metrics and demonstrate that the observed differences in bias measurement can be systematically explained via differences in parameter choices for our generalized metrics.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="czarnowska-etal-2021-quantifying">
<titleInfo>
<title>Quantifying Social Biases in NLP: A Generalization and Empirical Comparison of Extrinsic Fairness Metrics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Paula</namePart>
<namePart type="family">Czarnowska</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yogarshi</namePart>
<namePart type="family">Vyas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kashif</namePart>
<namePart type="family">Shah</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<genre authority="bibutilsgt">journal article</genre>
<relatedItem type="host">
<titleInfo>
<title>Transactions of the Association for Computational Linguistics</title>
</titleInfo>
<originInfo>
<issuance>continuing</issuance>
<publisher>MIT Press</publisher>
<place>
<placeTerm type="text">Cambridge, MA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">periodical</genre>
<genre authority="bibutilsgt">academic journal</genre>
</relatedItem>
<abstract>Measuring bias is key for better understanding and addressing unfairness in NLP/ML models. This is often done via fairness metrics, which quantify the differences in a model’s behaviour across a range of demographic groups. In this work, we shed more light on the differences and similarities between the fairness metrics used in NLP. First, we unify a broad range of existing metrics under three generalized fairness metrics, revealing the connections between them. Next, we carry out an extensive empirical comparison of existing metrics and demonstrate that the observed differences in bias measurement can be systematically explained via differences in parameter choices for our generalized metrics.</abstract>
<identifier type="citekey">czarnowska-etal-2021-quantifying</identifier>
<identifier type="doi">10.1162/tacl_a_00425</identifier>
<location>
<url>https://aclanthology.org/2021.tacl-1.74</url>
</location>
<part>
<date>2021</date>
<detail type="volume"><number>9</number></detail>
<extent unit="page">
<start>1249</start>
<end>1267</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Journal Article
%T Quantifying Social Biases in NLP: A Generalization and Empirical Comparison of Extrinsic Fairness Metrics
%A Czarnowska, Paula
%A Vyas, Yogarshi
%A Shah, Kashif
%J Transactions of the Association for Computational Linguistics
%D 2021
%V 9
%I MIT Press
%C Cambridge, MA
%F czarnowska-etal-2021-quantifying
%X Measuring bias is key for better understanding and addressing unfairness in NLP/ML models. This is often done via fairness metrics, which quantify the differences in a model’s behaviour across a range of demographic groups. In this work, we shed more light on the differences and similarities between the fairness metrics used in NLP. First, we unify a broad range of existing metrics under three generalized fairness metrics, revealing the connections between them. Next, we carry out an extensive empirical comparison of existing metrics and demonstrate that the observed differences in bias measurement can be systematically explained via differences in parameter choices for our generalized metrics.
%R 10.1162/tacl_a_00425
%U https://aclanthology.org/2021.tacl-1.74
%U https://doi.org/10.1162/tacl_a_00425
%P 1249-1267
Markdown (Informal)
[Quantifying Social Biases in NLP: A Generalization and Empirical Comparison of Extrinsic Fairness Metrics](https://aclanthology.org/2021.tacl-1.74) (Czarnowska et al., TACL 2021)
ACL