@inproceedings{gor-etal-2021-toward,
title = "Toward Deconfounding the Effect of Entity Demographics for Question Answering Accuracy",
author = "Gor, Maharshi and
Webster, Kellie and
Boyd-Graber, Jordan",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.emnlp-main.444/",
doi = "10.18653/v1/2021.emnlp-main.444",
pages = "5457--5473",
abstract = "The goal of question answering (QA) is to answer {\_}any{\_} question. However, major QA datasets have skewed distributions over gender, profession, and nationality. Despite that skew, an analysis of model accuracy reveals little evidence that accuracy is lower for people based on gender or nationality; instead, there is more variation on professions (question topic) and question ambiguity. But QA`s lack of representation could itself hide evidence of bias, necessitating QA datasets that better represent global diversity."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="gor-etal-2021-toward">
<titleInfo>
<title>Toward Deconfounding the Effect of Entity Demographics for Question Answering Accuracy</title>
</titleInfo>
<name type="personal">
<namePart type="given">Maharshi</namePart>
<namePart type="family">Gor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kellie</namePart>
<namePart type="family">Webster</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jordan</namePart>
<namePart type="family">Boyd-Graber</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marie-Francine</namePart>
<namePart type="family">Moens</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xuanjing</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lucia</namePart>
<namePart type="family">Specia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Scott</namePart>
<namePart type="given">Wen-tau</namePart>
<namePart type="family">Yih</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online and Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The goal of question answering (QA) is to answer _any_ question. However, major QA datasets have skewed distributions over gender, profession, and nationality. Despite that skew, an analysis of model accuracy reveals little evidence that accuracy is lower for people based on gender or nationality; instead, there is more variation on professions (question topic) and question ambiguity. But QA‘s lack of representation could itself hide evidence of bias, necessitating QA datasets that better represent global diversity.</abstract>
<identifier type="citekey">gor-etal-2021-toward</identifier>
<identifier type="doi">10.18653/v1/2021.emnlp-main.444</identifier>
<location>
<url>https://aclanthology.org/2021.emnlp-main.444/</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>5457</start>
<end>5473</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Toward Deconfounding the Effect of Entity Demographics for Question Answering Accuracy
%A Gor, Maharshi
%A Webster, Kellie
%A Boyd-Graber, Jordan
%Y Moens, Marie-Francine
%Y Huang, Xuanjing
%Y Specia, Lucia
%Y Yih, Scott Wen-tau
%S Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing
%D 2021
%8 November
%I Association for Computational Linguistics
%C Online and Punta Cana, Dominican Republic
%F gor-etal-2021-toward
%X The goal of question answering (QA) is to answer _any_ question. However, major QA datasets have skewed distributions over gender, profession, and nationality. Despite that skew, an analysis of model accuracy reveals little evidence that accuracy is lower for people based on gender or nationality; instead, there is more variation on professions (question topic) and question ambiguity. But QA‘s lack of representation could itself hide evidence of bias, necessitating QA datasets that better represent global diversity.
%R 10.18653/v1/2021.emnlp-main.444
%U https://aclanthology.org/2021.emnlp-main.444/
%U https://doi.org/10.18653/v1/2021.emnlp-main.444
%P 5457-5473
Markdown (Informal)
[Toward Deconfounding the Effect of Entity Demographics for Question Answering Accuracy](https://aclanthology.org/2021.emnlp-main.444/) (Gor et al., EMNLP 2021)
ACL