@inproceedings{balkir-etal-2022-challenges,
title = "Challenges in Applying Explainability Methods to Improve the Fairness of {NLP} Models",
author = "Balkir, Esma and
Kiritchenko, Svetlana and
Nejadgholi, Isar and
Fraser, Kathleen",
editor = "Verma, Apurv and
Pruksachatkun, Yada and
Chang, Kai-Wei and
Galstyan, Aram and
Dhamala, Jwala and
Cao, Yang Trista",
booktitle = "Proceedings of the 2nd Workshop on Trustworthy Natural Language Processing (TrustNLP 2022)",
month = jul,
year = "2022",
address = "Seattle, U.S.A.",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.trustnlp-1.8/",
doi = "10.18653/v1/2022.trustnlp-1.8",
pages = "80--92",
abstract = "Motivations for methods in explainable artificial intelligence (XAI) often include detecting, quantifying and mitigating bias, and contributing to making machine learning models fairer. However, exactly how an XAI method can help in combating biases is often left unspecified. In this paper, we briefly review trends in explainability and fairness in NLP research, identify the current practices in which explainability methods are applied to detect and mitigate bias, and investigate the barriers preventing XAI methods from being used more widely in tackling fairness issues."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="balkir-etal-2022-challenges">
<titleInfo>
<title>Challenges in Applying Explainability Methods to Improve the Fairness of NLP Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Esma</namePart>
<namePart type="family">Balkir</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Svetlana</namePart>
<namePart type="family">Kiritchenko</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Isar</namePart>
<namePart type="family">Nejadgholi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kathleen</namePart>
<namePart type="family">Fraser</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2nd Workshop on Trustworthy Natural Language Processing (TrustNLP 2022)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Apurv</namePart>
<namePart type="family">Verma</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yada</namePart>
<namePart type="family">Pruksachatkun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kai-Wei</namePart>
<namePart type="family">Chang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aram</namePart>
<namePart type="family">Galstyan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jwala</namePart>
<namePart type="family">Dhamala</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="given">Trista</namePart>
<namePart type="family">Cao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Seattle, U.S.A.</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Motivations for methods in explainable artificial intelligence (XAI) often include detecting, quantifying and mitigating bias, and contributing to making machine learning models fairer. However, exactly how an XAI method can help in combating biases is often left unspecified. In this paper, we briefly review trends in explainability and fairness in NLP research, identify the current practices in which explainability methods are applied to detect and mitigate bias, and investigate the barriers preventing XAI methods from being used more widely in tackling fairness issues.</abstract>
<identifier type="citekey">balkir-etal-2022-challenges</identifier>
<identifier type="doi">10.18653/v1/2022.trustnlp-1.8</identifier>
<location>
<url>https://aclanthology.org/2022.trustnlp-1.8/</url>
</location>
<part>
<date>2022-07</date>
<extent unit="page">
<start>80</start>
<end>92</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Challenges in Applying Explainability Methods to Improve the Fairness of NLP Models
%A Balkir, Esma
%A Kiritchenko, Svetlana
%A Nejadgholi, Isar
%A Fraser, Kathleen
%Y Verma, Apurv
%Y Pruksachatkun, Yada
%Y Chang, Kai-Wei
%Y Galstyan, Aram
%Y Dhamala, Jwala
%Y Cao, Yang Trista
%S Proceedings of the 2nd Workshop on Trustworthy Natural Language Processing (TrustNLP 2022)
%D 2022
%8 July
%I Association for Computational Linguistics
%C Seattle, U.S.A.
%F balkir-etal-2022-challenges
%X Motivations for methods in explainable artificial intelligence (XAI) often include detecting, quantifying and mitigating bias, and contributing to making machine learning models fairer. However, exactly how an XAI method can help in combating biases is often left unspecified. In this paper, we briefly review trends in explainability and fairness in NLP research, identify the current practices in which explainability methods are applied to detect and mitigate bias, and investigate the barriers preventing XAI methods from being used more widely in tackling fairness issues.
%R 10.18653/v1/2022.trustnlp-1.8
%U https://aclanthology.org/2022.trustnlp-1.8/
%U https://doi.org/10.18653/v1/2022.trustnlp-1.8
%P 80-92
Markdown (Informal)
[Challenges in Applying Explainability Methods to Improve the Fairness of NLP Models](https://aclanthology.org/2022.trustnlp-1.8/) (Balkir et al., TrustNLP 2022)
ACL