@inproceedings{lin-chen-2021-exploiting,
title = "Exploiting Low-Resource Code-Switching Data to {M}andarin-{E}nglish Speech Recognition Systems",
author = "Lin, Hou-An and
Chen, Chia-Ping",
editor = "Lee, Lung-Hao and
Chang, Chia-Hui and
Chen, Kuan-Yu",
booktitle = "Proceedings of the 33rd Conference on Computational Linguistics and Speech Processing (ROCLING 2021)",
month = oct,
year = "2021",
address = "Taoyuan, Taiwan",
publisher = "The Association for Computational Linguistics and Chinese Language Processing (ACLCLP)",
url = "https://aclanthology.org/2021.rocling-1.11",
pages = "81--86",
abstract = "In this paper, we investigate how to use limited code-switching data to implement a code-switching speech recognition system. We utilize the Transformer end-to-end model to develop our code switching speech recognition system, which is trained with the Mandarin dataset and a small amount of Mandarin-English code switching dataset, as the baseline of this paper. Next, we compare the performance of systems after adding multi-task learning and transfer learning. Character Error Rate(CER) is adopted as the criterion for the system. Finally, we combined the three systems with the language model, respectively, our best result dropped to 23.9{\%} compared with the baseline of 28.7{\%}.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lin-chen-2021-exploiting">
<titleInfo>
<title>Exploiting Low-Resource Code-Switching Data to Mandarin-English Speech Recognition Systems</title>
</titleInfo>
<name type="personal">
<namePart type="given">Hou-An</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chia-Ping</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 33rd Conference on Computational Linguistics and Speech Processing (ROCLING 2021)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lung-Hao</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chia-Hui</namePart>
<namePart type="family">Chang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kuan-Yu</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>The Association for Computational Linguistics and Chinese Language Processing (ACLCLP)</publisher>
<place>
<placeTerm type="text">Taoyuan, Taiwan</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper, we investigate how to use limited code-switching data to implement a code-switching speech recognition system. We utilize the Transformer end-to-end model to develop our code switching speech recognition system, which is trained with the Mandarin dataset and a small amount of Mandarin-English code switching dataset, as the baseline of this paper. Next, we compare the performance of systems after adding multi-task learning and transfer learning. Character Error Rate(CER) is adopted as the criterion for the system. Finally, we combined the three systems with the language model, respectively, our best result dropped to 23.9% compared with the baseline of 28.7%.</abstract>
<identifier type="citekey">lin-chen-2021-exploiting</identifier>
<location>
<url>https://aclanthology.org/2021.rocling-1.11</url>
</location>
<part>
<date>2021-10</date>
<extent unit="page">
<start>81</start>
<end>86</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Exploiting Low-Resource Code-Switching Data to Mandarin-English Speech Recognition Systems
%A Lin, Hou-An
%A Chen, Chia-Ping
%Y Lee, Lung-Hao
%Y Chang, Chia-Hui
%Y Chen, Kuan-Yu
%S Proceedings of the 33rd Conference on Computational Linguistics and Speech Processing (ROCLING 2021)
%D 2021
%8 October
%I The Association for Computational Linguistics and Chinese Language Processing (ACLCLP)
%C Taoyuan, Taiwan
%F lin-chen-2021-exploiting
%X In this paper, we investigate how to use limited code-switching data to implement a code-switching speech recognition system. We utilize the Transformer end-to-end model to develop our code switching speech recognition system, which is trained with the Mandarin dataset and a small amount of Mandarin-English code switching dataset, as the baseline of this paper. Next, we compare the performance of systems after adding multi-task learning and transfer learning. Character Error Rate(CER) is adopted as the criterion for the system. Finally, we combined the three systems with the language model, respectively, our best result dropped to 23.9% compared with the baseline of 28.7%.
%U https://aclanthology.org/2021.rocling-1.11
%P 81-86
Markdown (Informal)
[Exploiting Low-Resource Code-Switching Data to Mandarin-English Speech Recognition Systems](https://aclanthology.org/2021.rocling-1.11) (Lin & Chen, ROCLING 2021)
ACL