@inproceedings{zhou-tan-2021-investigating,
title = "Investigating the Effect of Natural Language Explanations on Out-of-Distribution Generalization in Few-shot {NLI}",
author = "Zhou, Yangqiaoyu and
Tan, Chenhao",
editor = "Sedoc, Jo{\~a}o and
Rogers, Anna and
Rumshisky, Anna and
Tafreshi, Shabnam",
booktitle = "Proceedings of the Second Workshop on Insights from Negative Results in NLP",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.insights-1.17/",
doi = "10.18653/v1/2021.insights-1.17",
pages = "117--124",
abstract = "Although neural models have shown strong performance in datasets such as SNLI, they lack the ability to generalize out-of-distribution (OOD). In this work, we formulate a few-shot learning setup and examine the effects of natural language explanations on OOD generalization. We leverage the templates in the HANS dataset and construct templated natural language explanations for each template. Although generated explanations show competitive BLEU scores against ground truth explanations, they fail to improve prediction performance. We further show that generated explanations often hallucinate information and miss key elements that indicate the label."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhou-tan-2021-investigating">
<titleInfo>
<title>Investigating the Effect of Natural Language Explanations on Out-of-Distribution Generalization in Few-shot NLI</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yangqiaoyu</namePart>
<namePart type="family">Zhou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chenhao</namePart>
<namePart type="family">Tan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on Insights from Negative Results in NLP</title>
</titleInfo>
<name type="personal">
<namePart type="given">João</namePart>
<namePart type="family">Sedoc</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Rogers</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Rumshisky</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shabnam</namePart>
<namePart type="family">Tafreshi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online and Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Although neural models have shown strong performance in datasets such as SNLI, they lack the ability to generalize out-of-distribution (OOD). In this work, we formulate a few-shot learning setup and examine the effects of natural language explanations on OOD generalization. We leverage the templates in the HANS dataset and construct templated natural language explanations for each template. Although generated explanations show competitive BLEU scores against ground truth explanations, they fail to improve prediction performance. We further show that generated explanations often hallucinate information and miss key elements that indicate the label.</abstract>
<identifier type="citekey">zhou-tan-2021-investigating</identifier>
<identifier type="doi">10.18653/v1/2021.insights-1.17</identifier>
<location>
<url>https://aclanthology.org/2021.insights-1.17/</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>117</start>
<end>124</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Investigating the Effect of Natural Language Explanations on Out-of-Distribution Generalization in Few-shot NLI
%A Zhou, Yangqiaoyu
%A Tan, Chenhao
%Y Sedoc, João
%Y Rogers, Anna
%Y Rumshisky, Anna
%Y Tafreshi, Shabnam
%S Proceedings of the Second Workshop on Insights from Negative Results in NLP
%D 2021
%8 November
%I Association for Computational Linguistics
%C Online and Punta Cana, Dominican Republic
%F zhou-tan-2021-investigating
%X Although neural models have shown strong performance in datasets such as SNLI, they lack the ability to generalize out-of-distribution (OOD). In this work, we formulate a few-shot learning setup and examine the effects of natural language explanations on OOD generalization. We leverage the templates in the HANS dataset and construct templated natural language explanations for each template. Although generated explanations show competitive BLEU scores against ground truth explanations, they fail to improve prediction performance. We further show that generated explanations often hallucinate information and miss key elements that indicate the label.
%R 10.18653/v1/2021.insights-1.17
%U https://aclanthology.org/2021.insights-1.17/
%U https://doi.org/10.18653/v1/2021.insights-1.17
%P 117-124
Markdown (Informal)
[Investigating the Effect of Natural Language Explanations on Out-of-Distribution Generalization in Few-shot NLI](https://aclanthology.org/2021.insights-1.17/) (Zhou & Tan, insights 2021)
ACL