@inproceedings{zhou-etal-2021-think,
title = "Think Before You Speak: Learning to Generate Implicit Knowledge for Response Generation by Self-Talk",
author = "Zhou, Pei and
Hedayatnia, Behnam and
Gopalakrishnan, Karthik and
Kim, Seokhwan and
Pujara, Jay and
Ren, Xiang and
Liu, Yang and
Hakkani-Tur, Dilek",
editor = "Papangelis, Alexandros and
Budzianowski, Pawe{\l} and
Liu, Bing and
Nouri, Elnaz and
Rastogi, Abhinav and
Chen, Yun-Nung",
booktitle = "Proceedings of the 3rd Workshop on Natural Language Processing for Conversational AI",
month = nov,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.nlp4convai-1.23/",
doi = "10.18653/v1/2021.nlp4convai-1.23",
pages = "251--253",
abstract = "Humans make appropriate responses not only based on previous dialogue utterances but also on implicit background knowledge such as common sense. Although neural response generation models seem to produce human-like responses, they are mostly end-to-end and not generating intermediate grounds between a dialogue history and responses. This work aims to study if and how we can train an RG model that talks with itself to generate implicit knowledge before making responses. We further investigate can such models identify when to generate implicit background knowledge and when it is not necessary. Experimental results show that compared with models that directly generate responses given a dialogue history, self-talk models produce better-quality responses according to human evaluation on grammaticality, coherence, and engagingness. And models that are trained to identify when to self-talk further improves the response quality. Analysis on generated implicit knowledge shows that models mostly use the knowledge appropriately in the responses."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhou-etal-2021-think">
<titleInfo>
<title>Think Before You Speak: Learning to Generate Implicit Knowledge for Response Generation by Self-Talk</title>
</titleInfo>
<name type="personal">
<namePart type="given">Pei</namePart>
<namePart type="family">Zhou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Behnam</namePart>
<namePart type="family">Hedayatnia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Karthik</namePart>
<namePart type="family">Gopalakrishnan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Seokhwan</namePart>
<namePart type="family">Kim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jay</namePart>
<namePart type="family">Pujara</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiang</namePart>
<namePart type="family">Ren</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dilek</namePart>
<namePart type="family">Hakkani-Tur</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 3rd Workshop on Natural Language Processing for Conversational AI</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alexandros</namePart>
<namePart type="family">Papangelis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Paweł</namePart>
<namePart type="family">Budzianowski</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bing</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Elnaz</namePart>
<namePart type="family">Nouri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Abhinav</namePart>
<namePart type="family">Rastogi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Humans make appropriate responses not only based on previous dialogue utterances but also on implicit background knowledge such as common sense. Although neural response generation models seem to produce human-like responses, they are mostly end-to-end and not generating intermediate grounds between a dialogue history and responses. This work aims to study if and how we can train an RG model that talks with itself to generate implicit knowledge before making responses. We further investigate can such models identify when to generate implicit background knowledge and when it is not necessary. Experimental results show that compared with models that directly generate responses given a dialogue history, self-talk models produce better-quality responses according to human evaluation on grammaticality, coherence, and engagingness. And models that are trained to identify when to self-talk further improves the response quality. Analysis on generated implicit knowledge shows that models mostly use the knowledge appropriately in the responses.</abstract>
<identifier type="citekey">zhou-etal-2021-think</identifier>
<identifier type="doi">10.18653/v1/2021.nlp4convai-1.23</identifier>
<location>
<url>https://aclanthology.org/2021.nlp4convai-1.23/</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>251</start>
<end>253</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Think Before You Speak: Learning to Generate Implicit Knowledge for Response Generation by Self-Talk
%A Zhou, Pei
%A Hedayatnia, Behnam
%A Gopalakrishnan, Karthik
%A Kim, Seokhwan
%A Pujara, Jay
%A Ren, Xiang
%A Liu, Yang
%A Hakkani-Tur, Dilek
%Y Papangelis, Alexandros
%Y Budzianowski, Paweł
%Y Liu, Bing
%Y Nouri, Elnaz
%Y Rastogi, Abhinav
%Y Chen, Yun-Nung
%S Proceedings of the 3rd Workshop on Natural Language Processing for Conversational AI
%D 2021
%8 November
%I Association for Computational Linguistics
%C Online
%F zhou-etal-2021-think
%X Humans make appropriate responses not only based on previous dialogue utterances but also on implicit background knowledge such as common sense. Although neural response generation models seem to produce human-like responses, they are mostly end-to-end and not generating intermediate grounds between a dialogue history and responses. This work aims to study if and how we can train an RG model that talks with itself to generate implicit knowledge before making responses. We further investigate can such models identify when to generate implicit background knowledge and when it is not necessary. Experimental results show that compared with models that directly generate responses given a dialogue history, self-talk models produce better-quality responses according to human evaluation on grammaticality, coherence, and engagingness. And models that are trained to identify when to self-talk further improves the response quality. Analysis on generated implicit knowledge shows that models mostly use the knowledge appropriately in the responses.
%R 10.18653/v1/2021.nlp4convai-1.23
%U https://aclanthology.org/2021.nlp4convai-1.23/
%U https://doi.org/10.18653/v1/2021.nlp4convai-1.23
%P 251-253
Markdown (Informal)
[Think Before You Speak: Learning to Generate Implicit Knowledge for Response Generation by Self-Talk](https://aclanthology.org/2021.nlp4convai-1.23/) (Zhou et al., NLP4ConvAI 2021)
ACL