@inproceedings{mcmanus-etal-2023-ups,
title = "The Ups and Downs of Training {R}o{BERT}a-based models on Smaller Datasets for Translation Tasks from Classical {C}hinese into Modern Standard {M}andarin and {M}odern {E}nglish",
author = "McManus, Stuart Michael and
Liu, Roslin and
Li, Yuji and
Tam, Leo and
Qiu, Stephanie and
Yu, Letian",
booktitle = "Proceedings of ALT2023: Ancient Language Translation Workshop",
month = sep,
year = "2023",
address = "Macau SAR, China",
publisher = "Asia-Pacific Association for Machine Translation",
url = "https://aclanthology.org/2023.alt-1.2/",
pages = "15--22",
abstract = "The paper presents an investigation into the effectiveness of pre-trained language models, Siku-RoBERTa and RoBERTa, for Classical Chinese to Modern Standard Mandarin and Classical Chinese to English translation tasks. The English translation model resulted in unsatisfactory performance due to the small dataset, while the Modern Standard Mandarin model gave reasonable results."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="mcmanus-etal-2023-ups">
<titleInfo>
<title>The Ups and Downs of Training RoBERTa-based models on Smaller Datasets for Translation Tasks from Classical Chinese into Modern Standard Mandarin and Modern English</title>
</titleInfo>
<name type="personal">
<namePart type="given">Stuart</namePart>
<namePart type="given">Michael</namePart>
<namePart type="family">McManus</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Roslin</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yuji</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Leo</namePart>
<namePart type="family">Tam</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Stephanie</namePart>
<namePart type="family">Qiu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Letian</namePart>
<namePart type="family">Yu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of ALT2023: Ancient Language Translation Workshop</title>
</titleInfo>
<originInfo>
<publisher>Asia-Pacific Association for Machine Translation</publisher>
<place>
<placeTerm type="text">Macau SAR, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The paper presents an investigation into the effectiveness of pre-trained language models, Siku-RoBERTa and RoBERTa, for Classical Chinese to Modern Standard Mandarin and Classical Chinese to English translation tasks. The English translation model resulted in unsatisfactory performance due to the small dataset, while the Modern Standard Mandarin model gave reasonable results.</abstract>
<identifier type="citekey">mcmanus-etal-2023-ups</identifier>
<location>
<url>https://aclanthology.org/2023.alt-1.2/</url>
</location>
<part>
<date>2023-09</date>
<extent unit="page">
<start>15</start>
<end>22</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T The Ups and Downs of Training RoBERTa-based models on Smaller Datasets for Translation Tasks from Classical Chinese into Modern Standard Mandarin and Modern English
%A McManus, Stuart Michael
%A Liu, Roslin
%A Li, Yuji
%A Tam, Leo
%A Qiu, Stephanie
%A Yu, Letian
%S Proceedings of ALT2023: Ancient Language Translation Workshop
%D 2023
%8 September
%I Asia-Pacific Association for Machine Translation
%C Macau SAR, China
%F mcmanus-etal-2023-ups
%X The paper presents an investigation into the effectiveness of pre-trained language models, Siku-RoBERTa and RoBERTa, for Classical Chinese to Modern Standard Mandarin and Classical Chinese to English translation tasks. The English translation model resulted in unsatisfactory performance due to the small dataset, while the Modern Standard Mandarin model gave reasonable results.
%U https://aclanthology.org/2023.alt-1.2/
%P 15-22
Markdown (Informal)
[The Ups and Downs of Training RoBERTa-based models on Smaller Datasets for Translation Tasks from Classical Chinese into Modern Standard Mandarin and Modern English](https://aclanthology.org/2023.alt-1.2/) (McManus et al., alt 2023)
ACL