@inproceedings{jayaram-allaway-2021-human,
title = "Human Rationales as Attribution Priors for Explainable Stance Detection",
author = "Jayaram, Sahil and
Allaway, Emily",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.emnlp-main.450/",
doi = "10.18653/v1/2021.emnlp-main.450",
pages = "5540--5554",
abstract = "As NLP systems become better at detecting opinions and beliefs from text, it is important to ensure not only that models are accurate but also that they arrive at their predictions in ways that align with human reasoning. In this work, we present a method for imparting human-like rationalization to a stance detection model using crowdsourced annotations on a small fraction of the training data. We show that in a data-scarce setting, our approach can improve the reasoning of a state-of-the-art classifier{---}particularly for inputs containing challenging phenomena such as sarcasm{---}at no cost in predictive performance. Furthermore, we demonstrate that attention weights surpass a leading attribution method in providing faithful explanations of our model`s predictions, thus serving as a computationally cheap and reliable source of attributions for our model."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="jayaram-allaway-2021-human">
<titleInfo>
<title>Human Rationales as Attribution Priors for Explainable Stance Detection</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sahil</namePart>
<namePart type="family">Jayaram</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Emily</namePart>
<namePart type="family">Allaway</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marie-Francine</namePart>
<namePart type="family">Moens</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xuanjing</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lucia</namePart>
<namePart type="family">Specia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Scott</namePart>
<namePart type="given">Wen-tau</namePart>
<namePart type="family">Yih</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online and Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>As NLP systems become better at detecting opinions and beliefs from text, it is important to ensure not only that models are accurate but also that they arrive at their predictions in ways that align with human reasoning. In this work, we present a method for imparting human-like rationalization to a stance detection model using crowdsourced annotations on a small fraction of the training data. We show that in a data-scarce setting, our approach can improve the reasoning of a state-of-the-art classifier—particularly for inputs containing challenging phenomena such as sarcasm—at no cost in predictive performance. Furthermore, we demonstrate that attention weights surpass a leading attribution method in providing faithful explanations of our model‘s predictions, thus serving as a computationally cheap and reliable source of attributions for our model.</abstract>
<identifier type="citekey">jayaram-allaway-2021-human</identifier>
<identifier type="doi">10.18653/v1/2021.emnlp-main.450</identifier>
<location>
<url>https://aclanthology.org/2021.emnlp-main.450/</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>5540</start>
<end>5554</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Human Rationales as Attribution Priors for Explainable Stance Detection
%A Jayaram, Sahil
%A Allaway, Emily
%Y Moens, Marie-Francine
%Y Huang, Xuanjing
%Y Specia, Lucia
%Y Yih, Scott Wen-tau
%S Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing
%D 2021
%8 November
%I Association for Computational Linguistics
%C Online and Punta Cana, Dominican Republic
%F jayaram-allaway-2021-human
%X As NLP systems become better at detecting opinions and beliefs from text, it is important to ensure not only that models are accurate but also that they arrive at their predictions in ways that align with human reasoning. In this work, we present a method for imparting human-like rationalization to a stance detection model using crowdsourced annotations on a small fraction of the training data. We show that in a data-scarce setting, our approach can improve the reasoning of a state-of-the-art classifier—particularly for inputs containing challenging phenomena such as sarcasm—at no cost in predictive performance. Furthermore, we demonstrate that attention weights surpass a leading attribution method in providing faithful explanations of our model‘s predictions, thus serving as a computationally cheap and reliable source of attributions for our model.
%R 10.18653/v1/2021.emnlp-main.450
%U https://aclanthology.org/2021.emnlp-main.450/
%U https://doi.org/10.18653/v1/2021.emnlp-main.450
%P 5540-5554
Markdown (Informal)
[Human Rationales as Attribution Priors for Explainable Stance Detection](https://aclanthology.org/2021.emnlp-main.450/) (Jayaram & Allaway, EMNLP 2021)
ACL