@inproceedings{madasu-srivastava-2022-large,
title = "What do Large Language Models Learn beyond Language?",
author = "Madasu, Avinash and
Srivastava, Shashank",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.findings-emnlp.516/",
doi = "10.18653/v1/2022.findings-emnlp.516",
pages = "6940--6953",
abstract = "Large language models (LMs) have rapidly become a mainstay in Natural Language Processing. These models are known to acquire rich linguistic knowledge from training on large amounts of text. In this paper, we investigate if pre-training on text also confers these models with helpful {\textquoteleft}inductive biases' for non-linguistic reasoning. On a set of 19 diverse non-linguistic tasks involving quantitative computations, recognizing regular expressions and reasoning over strings. We find that pretrained models significantly outperform comparable non-pretrained neural models. This remains true also in experiments with training non-pretrained models with fewer parameters to account for model regularization effects. We further explore the effect of text domain on LMs by pretraining models from text from different domains and provenances. Our experiments surprisingly reveal that the positive effects of pre-training persist even when pretraining on multi-lingual text or computer code, and even for text generated from synthetic languages. Our findings suggest a hithertho unexplored deep connection between pre-training and inductive learning abilities of language models"
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="madasu-srivastava-2022-large">
<titleInfo>
<title>What do Large Language Models Learn beyond Language?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Avinash</namePart>
<namePart type="family">Madasu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shashank</namePart>
<namePart type="family">Srivastava</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2022</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yoav</namePart>
<namePart type="family">Goldberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zornitsa</namePart>
<namePart type="family">Kozareva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yue</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Large language models (LMs) have rapidly become a mainstay in Natural Language Processing. These models are known to acquire rich linguistic knowledge from training on large amounts of text. In this paper, we investigate if pre-training on text also confers these models with helpful ‘inductive biases’ for non-linguistic reasoning. On a set of 19 diverse non-linguistic tasks involving quantitative computations, recognizing regular expressions and reasoning over strings. We find that pretrained models significantly outperform comparable non-pretrained neural models. This remains true also in experiments with training non-pretrained models with fewer parameters to account for model regularization effects. We further explore the effect of text domain on LMs by pretraining models from text from different domains and provenances. Our experiments surprisingly reveal that the positive effects of pre-training persist even when pretraining on multi-lingual text or computer code, and even for text generated from synthetic languages. Our findings suggest a hithertho unexplored deep connection between pre-training and inductive learning abilities of language models</abstract>
<identifier type="citekey">madasu-srivastava-2022-large</identifier>
<identifier type="doi">10.18653/v1/2022.findings-emnlp.516</identifier>
<location>
<url>https://aclanthology.org/2022.findings-emnlp.516/</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>6940</start>
<end>6953</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T What do Large Language Models Learn beyond Language?
%A Madasu, Avinash
%A Srivastava, Shashank
%Y Goldberg, Yoav
%Y Kozareva, Zornitsa
%Y Zhang, Yue
%S Findings of the Association for Computational Linguistics: EMNLP 2022
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates
%F madasu-srivastava-2022-large
%X Large language models (LMs) have rapidly become a mainstay in Natural Language Processing. These models are known to acquire rich linguistic knowledge from training on large amounts of text. In this paper, we investigate if pre-training on text also confers these models with helpful ‘inductive biases’ for non-linguistic reasoning. On a set of 19 diverse non-linguistic tasks involving quantitative computations, recognizing regular expressions and reasoning over strings. We find that pretrained models significantly outperform comparable non-pretrained neural models. This remains true also in experiments with training non-pretrained models with fewer parameters to account for model regularization effects. We further explore the effect of text domain on LMs by pretraining models from text from different domains and provenances. Our experiments surprisingly reveal that the positive effects of pre-training persist even when pretraining on multi-lingual text or computer code, and even for text generated from synthetic languages. Our findings suggest a hithertho unexplored deep connection between pre-training and inductive learning abilities of language models
%R 10.18653/v1/2022.findings-emnlp.516
%U https://aclanthology.org/2022.findings-emnlp.516/
%U https://doi.org/10.18653/v1/2022.findings-emnlp.516
%P 6940-6953
Markdown (Informal)
[What do Large Language Models Learn beyond Language?](https://aclanthology.org/2022.findings-emnlp.516/) (Madasu & Srivastava, Findings 2022)
ACL
- Avinash Madasu and Shashank Srivastava. 2022. What do Large Language Models Learn beyond Language?. In Findings of the Association for Computational Linguistics: EMNLP 2022, pages 6940–6953, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics.