@inproceedings{sreedhar-etal-2020-learning,
title = "Learning Improvised Chatbots from Adversarial Modifications of Natural Language Feedback",
author = "Sreedhar, Makesh Narsimhan and
Ni, Kun and
Reddy, Siva",
editor = "Cohn, Trevor and
He, Yulan and
Liu, Yang",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.findings-emnlp.221/",
doi = "10.18653/v1/2020.findings-emnlp.221",
pages = "2445--2453",
abstract = "The ubiquitous nature of dialogue systems and their interaction with users generate an enormous amount of data. Can we improve chatbots using this data? A self-feeding chatbot improves itself by asking natural language feedback when a user is dissatisfied with its response and uses this feedback as an additional training sample. However, user feedback in most cases contains extraneous sequences hindering their usefulness as a training sample. In this work, we propose a generative adversarial model that converts noisy feedback into a plausible natural response in a conversation. The generator`s goal is to convert the feedback into a response that answers the user`s previous utterance and to fool the discriminator which distinguishes feedback from natural responses. We show that augmenting original training data with these modified feedback responses improves the original chatbot performance from 69.94{\%}to 75.96{\%} in ranking correct responses on the PERSONACHATdataset, a large improvement given that the original model is already trained on 131k samples."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="sreedhar-etal-2020-learning">
<titleInfo>
<title>Learning Improvised Chatbots from Adversarial Modifications of Natural Language Feedback</title>
</titleInfo>
<name type="personal">
<namePart type="given">Makesh</namePart>
<namePart type="given">Narsimhan</namePart>
<namePart type="family">Sreedhar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kun</namePart>
<namePart type="family">Ni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Siva</namePart>
<namePart type="family">Reddy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2020</title>
</titleInfo>
<name type="personal">
<namePart type="given">Trevor</namePart>
<namePart type="family">Cohn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yulan</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The ubiquitous nature of dialogue systems and their interaction with users generate an enormous amount of data. Can we improve chatbots using this data? A self-feeding chatbot improves itself by asking natural language feedback when a user is dissatisfied with its response and uses this feedback as an additional training sample. However, user feedback in most cases contains extraneous sequences hindering their usefulness as a training sample. In this work, we propose a generative adversarial model that converts noisy feedback into a plausible natural response in a conversation. The generator‘s goal is to convert the feedback into a response that answers the user‘s previous utterance and to fool the discriminator which distinguishes feedback from natural responses. We show that augmenting original training data with these modified feedback responses improves the original chatbot performance from 69.94%to 75.96% in ranking correct responses on the PERSONACHATdataset, a large improvement given that the original model is already trained on 131k samples.</abstract>
<identifier type="citekey">sreedhar-etal-2020-learning</identifier>
<identifier type="doi">10.18653/v1/2020.findings-emnlp.221</identifier>
<location>
<url>https://aclanthology.org/2020.findings-emnlp.221/</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>2445</start>
<end>2453</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Learning Improvised Chatbots from Adversarial Modifications of Natural Language Feedback
%A Sreedhar, Makesh Narsimhan
%A Ni, Kun
%A Reddy, Siva
%Y Cohn, Trevor
%Y He, Yulan
%Y Liu, Yang
%S Findings of the Association for Computational Linguistics: EMNLP 2020
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F sreedhar-etal-2020-learning
%X The ubiquitous nature of dialogue systems and their interaction with users generate an enormous amount of data. Can we improve chatbots using this data? A self-feeding chatbot improves itself by asking natural language feedback when a user is dissatisfied with its response and uses this feedback as an additional training sample. However, user feedback in most cases contains extraneous sequences hindering their usefulness as a training sample. In this work, we propose a generative adversarial model that converts noisy feedback into a plausible natural response in a conversation. The generator‘s goal is to convert the feedback into a response that answers the user‘s previous utterance and to fool the discriminator which distinguishes feedback from natural responses. We show that augmenting original training data with these modified feedback responses improves the original chatbot performance from 69.94%to 75.96% in ranking correct responses on the PERSONACHATdataset, a large improvement given that the original model is already trained on 131k samples.
%R 10.18653/v1/2020.findings-emnlp.221
%U https://aclanthology.org/2020.findings-emnlp.221/
%U https://doi.org/10.18653/v1/2020.findings-emnlp.221
%P 2445-2453
Markdown (Informal)
[Learning Improvised Chatbots from Adversarial Modifications of Natural Language Feedback](https://aclanthology.org/2020.findings-emnlp.221/) (Sreedhar et al., Findings 2020)
ACL