@article{paperno-2022-learning,
title = "On Learning Interpreted Languages with Recurrent Models",
author = "Paperno, Denis",
journal = "Computational Linguistics",
volume = "48",
number = "2",
month = jun,
year = "2022",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://aclanthology.org/2022.cl-2.7/",
doi = "10.1162/coli_a_00431",
pages = "471--482",
abstract = "Can recurrent neural nets, inspired by human sequential data processing, learn to understand language? We construct simplified data sets reflecting core properties of natural language as modeled in formal syntax and semantics: recursive syntactic structure and compositionality. We find LSTM and GRU networks to generalize to compositional interpretation well, but only in the most favorable learning settings, with a well-paced curriculum, extensive training data, and left-to-right (but not right-to-left) composition."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="paperno-2022-learning">
<titleInfo>
<title>On Learning Interpreted Languages with Recurrent Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Denis</namePart>
<namePart type="family">Paperno</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<genre authority="bibutilsgt">journal article</genre>
<relatedItem type="host">
<titleInfo>
<title>Computational Linguistics</title>
</titleInfo>
<originInfo>
<issuance>continuing</issuance>
<publisher>MIT Press</publisher>
<place>
<placeTerm type="text">Cambridge, MA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">periodical</genre>
<genre authority="bibutilsgt">academic journal</genre>
</relatedItem>
<abstract>Can recurrent neural nets, inspired by human sequential data processing, learn to understand language? We construct simplified data sets reflecting core properties of natural language as modeled in formal syntax and semantics: recursive syntactic structure and compositionality. We find LSTM and GRU networks to generalize to compositional interpretation well, but only in the most favorable learning settings, with a well-paced curriculum, extensive training data, and left-to-right (but not right-to-left) composition.</abstract>
<identifier type="citekey">paperno-2022-learning</identifier>
<identifier type="doi">10.1162/coli_a_00431</identifier>
<location>
<url>https://aclanthology.org/2022.cl-2.7/</url>
</location>
<part>
<date>2022-06</date>
<detail type="volume"><number>48</number></detail>
<detail type="issue"><number>2</number></detail>
<extent unit="page">
<start>471</start>
<end>482</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Journal Article
%T On Learning Interpreted Languages with Recurrent Models
%A Paperno, Denis
%J Computational Linguistics
%D 2022
%8 June
%V 48
%N 2
%I MIT Press
%C Cambridge, MA
%F paperno-2022-learning
%X Can recurrent neural nets, inspired by human sequential data processing, learn to understand language? We construct simplified data sets reflecting core properties of natural language as modeled in formal syntax and semantics: recursive syntactic structure and compositionality. We find LSTM and GRU networks to generalize to compositional interpretation well, but only in the most favorable learning settings, with a well-paced curriculum, extensive training data, and left-to-right (but not right-to-left) composition.
%R 10.1162/coli_a_00431
%U https://aclanthology.org/2022.cl-2.7/
%U https://doi.org/10.1162/coli_a_00431
%P 471-482
Markdown (Informal)
[On Learning Interpreted Languages with Recurrent Models](https://aclanthology.org/2022.cl-2.7/) (Paperno, CL 2022)
ACL