@inproceedings{han-tsvetkov-2021-influence-tuning,
title = "Influence Tuning: Demoting Spurious Correlations via Instance Attribution and Instance-Driven Updates",
author = "Han, Xiaochuang and
Tsvetkov, Yulia",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021",
month = nov,
year = "2021",
address = "Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.findings-emnlp.374",
doi = "10.18653/v1/2021.findings-emnlp.374",
pages = "4398--4409",
abstract = "Among the most critical limitations of deep learning NLP models are their lack of interpretability, and their reliance on spurious correlations. Prior work proposed various approaches to interpreting the black-box models to unveil the spurious correlations, but the research was primarily used in human-computer interaction scenarios. It still remains underexplored whether or how such model interpretations can be used to automatically {``}unlearn{''} confounding features. In this work, we propose influence tuning{---}a procedure that leverages model interpretations to update the model parameters towards a plausible interpretation (rather than an interpretation that relies on spurious patterns in the data) in addition to learning to predict the task labels. We show that in a controlled setup, influence tuning can help deconfounding the model from spurious patterns in data, significantly outperforming baseline methods that use adversarial training.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="han-tsvetkov-2021-influence-tuning">
<titleInfo>
<title>Influence Tuning: Demoting Spurious Correlations via Instance Attribution and Instance-Driven Updates</title>
</titleInfo>
<name type="personal">
<namePart type="given">Xiaochuang</namePart>
<namePart type="family">Han</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yulia</namePart>
<namePart type="family">Tsvetkov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2021</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marie-Francine</namePart>
<namePart type="family">Moens</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xuanjing</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lucia</namePart>
<namePart type="family">Specia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Scott</namePart>
<namePart type="given">Wen-tau</namePart>
<namePart type="family">Yih</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Among the most critical limitations of deep learning NLP models are their lack of interpretability, and their reliance on spurious correlations. Prior work proposed various approaches to interpreting the black-box models to unveil the spurious correlations, but the research was primarily used in human-computer interaction scenarios. It still remains underexplored whether or how such model interpretations can be used to automatically “unlearn” confounding features. In this work, we propose influence tuning—a procedure that leverages model interpretations to update the model parameters towards a plausible interpretation (rather than an interpretation that relies on spurious patterns in the data) in addition to learning to predict the task labels. We show that in a controlled setup, influence tuning can help deconfounding the model from spurious patterns in data, significantly outperforming baseline methods that use adversarial training.</abstract>
<identifier type="citekey">han-tsvetkov-2021-influence-tuning</identifier>
<identifier type="doi">10.18653/v1/2021.findings-emnlp.374</identifier>
<location>
<url>https://aclanthology.org/2021.findings-emnlp.374</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>4398</start>
<end>4409</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Influence Tuning: Demoting Spurious Correlations via Instance Attribution and Instance-Driven Updates
%A Han, Xiaochuang
%A Tsvetkov, Yulia
%Y Moens, Marie-Francine
%Y Huang, Xuanjing
%Y Specia, Lucia
%Y Yih, Scott Wen-tau
%S Findings of the Association for Computational Linguistics: EMNLP 2021
%D 2021
%8 November
%I Association for Computational Linguistics
%C Punta Cana, Dominican Republic
%F han-tsvetkov-2021-influence-tuning
%X Among the most critical limitations of deep learning NLP models are their lack of interpretability, and their reliance on spurious correlations. Prior work proposed various approaches to interpreting the black-box models to unveil the spurious correlations, but the research was primarily used in human-computer interaction scenarios. It still remains underexplored whether or how such model interpretations can be used to automatically “unlearn” confounding features. In this work, we propose influence tuning—a procedure that leverages model interpretations to update the model parameters towards a plausible interpretation (rather than an interpretation that relies on spurious patterns in the data) in addition to learning to predict the task labels. We show that in a controlled setup, influence tuning can help deconfounding the model from spurious patterns in data, significantly outperforming baseline methods that use adversarial training.
%R 10.18653/v1/2021.findings-emnlp.374
%U https://aclanthology.org/2021.findings-emnlp.374
%U https://doi.org/10.18653/v1/2021.findings-emnlp.374
%P 4398-4409
Markdown (Informal)
[Influence Tuning: Demoting Spurious Correlations via Instance Attribution and Instance-Driven Updates](https://aclanthology.org/2021.findings-emnlp.374) (Han & Tsvetkov, Findings 2021)
ACL