@inproceedings{yang-etal-2020-improving-text,
title = "Improving Text-to-Text Pre-trained Models for the Graph-to-Text Task",
author = "Yang, Zixiaofan and
Einolghozati, Arash and
Inan, Hakan and
Diedrick, Keith and
Fan, Angela and
Donmez, Pinar and
Gupta, Sonal",
editor = "Castro Ferreira, Thiago and
Gardent, Claire and
Ilinykh, Nikolai and
van der Lee, Chris and
Mille, Simon and
Moussallem, Diego and
Shimorina, Anastasia",
booktitle = "Proceedings of the 3rd International Workshop on Natural Language Generation from the Semantic Web (WebNLG+)",
month = "12",
year = "2020",
address = "Dublin, Ireland (Virtual)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.webnlg-1.11",
pages = "107--116",
abstract = "Converting a knowledge graph or sub-graph to natural text is useful when answering questions based on a knowledge base. High-capacity language models pre-trained on large-scale text corpora have recently been shown to be powerful when fine-tuned for the knowledge-graph-to-text (KG-to-text) task. In this paper, we propose two classes of methods to improve such pre-trained models for this task. First, we improve the structure awareness of the model by organizing the input as well as learning optimal ordering via multitask learning. Second, we bridge the domain gap between text-to-text and KG-to-text tasks via a second-phase KG-to-text pre-training on similar datasets and extra lexicalization supervision to make the input more similar to natural text. We demonstrate the efficacy of our methods on the popular WebNLG dataset. Our best model achieves an almost 3 point BLEU improvement on a strong baseline while lowering the relative slot-error-rate by around 35{\%}. We also validate our results via human evaluation.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yang-etal-2020-improving-text">
<titleInfo>
<title>Improving Text-to-Text Pre-trained Models for the Graph-to-Text Task</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zixiaofan</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Arash</namePart>
<namePart type="family">Einolghozati</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hakan</namePart>
<namePart type="family">Inan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Keith</namePart>
<namePart type="family">Diedrick</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Angela</namePart>
<namePart type="family">Fan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pinar</namePart>
<namePart type="family">Donmez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sonal</namePart>
<namePart type="family">Gupta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 3rd International Workshop on Natural Language Generation from the Semantic Web (WebNLG+)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Thiago</namePart>
<namePart type="family">Castro Ferreira</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Claire</namePart>
<namePart type="family">Gardent</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nikolai</namePart>
<namePart type="family">Ilinykh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chris</namePart>
<namePart type="family">van der Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Simon</namePart>
<namePart type="family">Mille</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Diego</namePart>
<namePart type="family">Moussallem</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anastasia</namePart>
<namePart type="family">Shimorina</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland (Virtual)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Converting a knowledge graph or sub-graph to natural text is useful when answering questions based on a knowledge base. High-capacity language models pre-trained on large-scale text corpora have recently been shown to be powerful when fine-tuned for the knowledge-graph-to-text (KG-to-text) task. In this paper, we propose two classes of methods to improve such pre-trained models for this task. First, we improve the structure awareness of the model by organizing the input as well as learning optimal ordering via multitask learning. Second, we bridge the domain gap between text-to-text and KG-to-text tasks via a second-phase KG-to-text pre-training on similar datasets and extra lexicalization supervision to make the input more similar to natural text. We demonstrate the efficacy of our methods on the popular WebNLG dataset. Our best model achieves an almost 3 point BLEU improvement on a strong baseline while lowering the relative slot-error-rate by around 35%. We also validate our results via human evaluation.</abstract>
<identifier type="citekey">yang-etal-2020-improving-text</identifier>
<location>
<url>https://aclanthology.org/2020.webnlg-1.11</url>
</location>
<part>
<date>2020-12</date>
<extent unit="page">
<start>107</start>
<end>116</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Improving Text-to-Text Pre-trained Models for the Graph-to-Text Task
%A Yang, Zixiaofan
%A Einolghozati, Arash
%A Inan, Hakan
%A Diedrick, Keith
%A Fan, Angela
%A Donmez, Pinar
%A Gupta, Sonal
%Y Castro Ferreira, Thiago
%Y Gardent, Claire
%Y Ilinykh, Nikolai
%Y van der Lee, Chris
%Y Mille, Simon
%Y Moussallem, Diego
%Y Shimorina, Anastasia
%S Proceedings of the 3rd International Workshop on Natural Language Generation from the Semantic Web (WebNLG+)
%D 2020
%8 December
%I Association for Computational Linguistics
%C Dublin, Ireland (Virtual)
%F yang-etal-2020-improving-text
%X Converting a knowledge graph or sub-graph to natural text is useful when answering questions based on a knowledge base. High-capacity language models pre-trained on large-scale text corpora have recently been shown to be powerful when fine-tuned for the knowledge-graph-to-text (KG-to-text) task. In this paper, we propose two classes of methods to improve such pre-trained models for this task. First, we improve the structure awareness of the model by organizing the input as well as learning optimal ordering via multitask learning. Second, we bridge the domain gap between text-to-text and KG-to-text tasks via a second-phase KG-to-text pre-training on similar datasets and extra lexicalization supervision to make the input more similar to natural text. We demonstrate the efficacy of our methods on the popular WebNLG dataset. Our best model achieves an almost 3 point BLEU improvement on a strong baseline while lowering the relative slot-error-rate by around 35%. We also validate our results via human evaluation.
%U https://aclanthology.org/2020.webnlg-1.11
%P 107-116
Markdown (Informal)
[Improving Text-to-Text Pre-trained Models for the Graph-to-Text Task](https://aclanthology.org/2020.webnlg-1.11) (Yang et al., WebNLG 2020)
ACL
- Zixiaofan Yang, Arash Einolghozati, Hakan Inan, Keith Diedrick, Angela Fan, Pinar Donmez, and Sonal Gupta. 2020. Improving Text-to-Text Pre-trained Models for the Graph-to-Text Task. In Proceedings of the 3rd International Workshop on Natural Language Generation from the Semantic Web (WebNLG+), pages 107–116, Dublin, Ireland (Virtual). Association for Computational Linguistics.