@inproceedings{baker-gillis-2021-sexism,
title = "Sexism in the Judiciary: The Importance of Bias Definition in {NLP} and In Our Courts",
author = "Baker Gillis, Noa",
editor = "Costa-jussa, Marta and
Gonen, Hila and
Hardmeier, Christian and
Webster, Kellie",
booktitle = "Proceedings of the 3rd Workshop on Gender Bias in Natural Language Processing",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.gebnlp-1.6",
doi = "10.18653/v1/2021.gebnlp-1.6",
pages = "45--54",
abstract = "We analyze 6.7 million case law documents to determine the presence of gender bias within our judicial system. We find that current bias detection methods in NLP are insufficient to determine gender bias in our case law database and propose an alternative approach. We show that existing algorithms{'} inconsistent results are consequences of prior research{'}s inconsistent definitions of biases themselves. Bias detection algorithms rely on groups of words to represent bias (e.g., {`}salary,{'} {`}job,{'} and {`}boss{'} to represent employment as a potentially biased theme against women in text). However, the methods to build these groups of words have several weaknesses, primarily that the word lists are based on the researchers{'} own intuitions. We suggest two new methods of automating the creation of word lists to represent biases. We find that our methods outperform current NLP bias detection methods. Our research improves the capabilities of NLP technology to detect bias and highlights gender biases present in influential case law. In order to test our NLP bias detection method{'}s performance, we regress our results of bias in case law against U.S census data of women{'}s participation in the workforce in the last 100 years.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="baker-gillis-2021-sexism">
<titleInfo>
<title>Sexism in the Judiciary: The Importance of Bias Definition in NLP and In Our Courts</title>
</titleInfo>
<name type="personal">
<namePart type="given">Noa</namePart>
<namePart type="family">Baker Gillis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 3rd Workshop on Gender Bias in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marta</namePart>
<namePart type="family">Costa-jussa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hila</namePart>
<namePart type="family">Gonen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christian</namePart>
<namePart type="family">Hardmeier</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kellie</namePart>
<namePart type="family">Webster</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We analyze 6.7 million case law documents to determine the presence of gender bias within our judicial system. We find that current bias detection methods in NLP are insufficient to determine gender bias in our case law database and propose an alternative approach. We show that existing algorithms’ inconsistent results are consequences of prior research’s inconsistent definitions of biases themselves. Bias detection algorithms rely on groups of words to represent bias (e.g., ‘salary,’ ‘job,’ and ‘boss’ to represent employment as a potentially biased theme against women in text). However, the methods to build these groups of words have several weaknesses, primarily that the word lists are based on the researchers’ own intuitions. We suggest two new methods of automating the creation of word lists to represent biases. We find that our methods outperform current NLP bias detection methods. Our research improves the capabilities of NLP technology to detect bias and highlights gender biases present in influential case law. In order to test our NLP bias detection method’s performance, we regress our results of bias in case law against U.S census data of women’s participation in the workforce in the last 100 years.</abstract>
<identifier type="citekey">baker-gillis-2021-sexism</identifier>
<identifier type="doi">10.18653/v1/2021.gebnlp-1.6</identifier>
<location>
<url>https://aclanthology.org/2021.gebnlp-1.6</url>
</location>
<part>
<date>2021-08</date>
<extent unit="page">
<start>45</start>
<end>54</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Sexism in the Judiciary: The Importance of Bias Definition in NLP and In Our Courts
%A Baker Gillis, Noa
%Y Costa-jussa, Marta
%Y Gonen, Hila
%Y Hardmeier, Christian
%Y Webster, Kellie
%S Proceedings of the 3rd Workshop on Gender Bias in Natural Language Processing
%D 2021
%8 August
%I Association for Computational Linguistics
%C Online
%F baker-gillis-2021-sexism
%X We analyze 6.7 million case law documents to determine the presence of gender bias within our judicial system. We find that current bias detection methods in NLP are insufficient to determine gender bias in our case law database and propose an alternative approach. We show that existing algorithms’ inconsistent results are consequences of prior research’s inconsistent definitions of biases themselves. Bias detection algorithms rely on groups of words to represent bias (e.g., ‘salary,’ ‘job,’ and ‘boss’ to represent employment as a potentially biased theme against women in text). However, the methods to build these groups of words have several weaknesses, primarily that the word lists are based on the researchers’ own intuitions. We suggest two new methods of automating the creation of word lists to represent biases. We find that our methods outperform current NLP bias detection methods. Our research improves the capabilities of NLP technology to detect bias and highlights gender biases present in influential case law. In order to test our NLP bias detection method’s performance, we regress our results of bias in case law against U.S census data of women’s participation in the workforce in the last 100 years.
%R 10.18653/v1/2021.gebnlp-1.6
%U https://aclanthology.org/2021.gebnlp-1.6
%U https://doi.org/10.18653/v1/2021.gebnlp-1.6
%P 45-54
Markdown (Informal)
[Sexism in the Judiciary: The Importance of Bias Definition in NLP and In Our Courts](https://aclanthology.org/2021.gebnlp-1.6) (Baker Gillis, GeBNLP 2021)
ACL