@inproceedings{bollmann-etal-2018-multi,
title = "Multi-task learning for historical text normalization: Size matters",
author = "Bollmann, Marcel and
S{\o}gaard, Anders and
Bingel, Joachim",
editor = "Haffari, Reza and
Cherry, Colin and
Foster, George and
Khadivi, Shahram and
Salehi, Bahar",
booktitle = "Proceedings of the Workshop on Deep Learning Approaches for Low-Resource {NLP}",
month = jul,
year = "2018",
address = "Melbourne",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-3403",
doi = "10.18653/v1/W18-3403",
pages = "19--24",
abstract = "Historical text normalization suffers from small datasets that exhibit high variance, and previous work has shown that multi-task learning can be used to leverage data from related problems in order to obtain more robust models. Previous work has been limited to datasets from a specific language and a specific historical period, and it is not clear whether results generalize. It therefore remains an open problem, when historical text normalization benefits from multi-task learning. We explore the benefits of multi-task learning across 10 different datasets, representing different languages and periods. Our main finding{---}contrary to what has been observed for other NLP tasks{---}is that multi-task learning mainly works when target task data is very scarce.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="bollmann-etal-2018-multi">
<titleInfo>
<title>Multi-task learning for historical text normalization: Size matters</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marcel</namePart>
<namePart type="family">Bollmann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anders</namePart>
<namePart type="family">Søgaard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joachim</namePart>
<namePart type="family">Bingel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Workshop on Deep Learning Approaches for Low-Resource NLP</title>
</titleInfo>
<name type="personal">
<namePart type="given">Reza</namePart>
<namePart type="family">Haffari</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Colin</namePart>
<namePart type="family">Cherry</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">George</namePart>
<namePart type="family">Foster</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shahram</namePart>
<namePart type="family">Khadivi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bahar</namePart>
<namePart type="family">Salehi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Melbourne</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Historical text normalization suffers from small datasets that exhibit high variance, and previous work has shown that multi-task learning can be used to leverage data from related problems in order to obtain more robust models. Previous work has been limited to datasets from a specific language and a specific historical period, and it is not clear whether results generalize. It therefore remains an open problem, when historical text normalization benefits from multi-task learning. We explore the benefits of multi-task learning across 10 different datasets, representing different languages and periods. Our main finding—contrary to what has been observed for other NLP tasks—is that multi-task learning mainly works when target task data is very scarce.</abstract>
<identifier type="citekey">bollmann-etal-2018-multi</identifier>
<identifier type="doi">10.18653/v1/W18-3403</identifier>
<location>
<url>https://aclanthology.org/W18-3403</url>
</location>
<part>
<date>2018-07</date>
<extent unit="page">
<start>19</start>
<end>24</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Multi-task learning for historical text normalization: Size matters
%A Bollmann, Marcel
%A Søgaard, Anders
%A Bingel, Joachim
%Y Haffari, Reza
%Y Cherry, Colin
%Y Foster, George
%Y Khadivi, Shahram
%Y Salehi, Bahar
%S Proceedings of the Workshop on Deep Learning Approaches for Low-Resource NLP
%D 2018
%8 July
%I Association for Computational Linguistics
%C Melbourne
%F bollmann-etal-2018-multi
%X Historical text normalization suffers from small datasets that exhibit high variance, and previous work has shown that multi-task learning can be used to leverage data from related problems in order to obtain more robust models. Previous work has been limited to datasets from a specific language and a specific historical period, and it is not clear whether results generalize. It therefore remains an open problem, when historical text normalization benefits from multi-task learning. We explore the benefits of multi-task learning across 10 different datasets, representing different languages and periods. Our main finding—contrary to what has been observed for other NLP tasks—is that multi-task learning mainly works when target task data is very scarce.
%R 10.18653/v1/W18-3403
%U https://aclanthology.org/W18-3403
%U https://doi.org/10.18653/v1/W18-3403
%P 19-24
Markdown (Informal)
[Multi-task learning for historical text normalization: Size matters](https://aclanthology.org/W18-3403) (Bollmann et al., ACL 2018)
ACL