@inproceedings{gaspers-etal-2021-impact,
title = "The impact of domain-specific representations on {BERT}-based multi-domain spoken language understanding",
author = {Gaspers, Judith and
Do, Quynh and
R{\"o}ding, Tobias and
Bradford, Melanie},
editor = "Ben-David, Eyal and
Cohen, Shay and
McDonald, Ryan and
Plank, Barbara and
Reichart, Roi and
Rotman, Guy and
Ziser, Yftah",
booktitle = "Proceedings of the Second Workshop on Domain Adaptation for NLP",
month = apr,
year = "2021",
address = "Kyiv, Ukraine",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.adaptnlp-1.4/",
pages = "28--32",
abstract = "This paper provides the first experimental study on the impact of using domain-specific representations on a BERT-based multi-task spoken language understanding (SLU) model for multi-domain applications. Our results on a real-world dataset covering three languages indicate that by using domain-specific representations learned adversarially, model performance can be improved across all of the three SLU subtasks domain classification, intent classification and slot filling. Gains are particularly large for domains with limited training data."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="gaspers-etal-2021-impact">
<titleInfo>
<title>The impact of domain-specific representations on BERT-based multi-domain spoken language understanding</title>
</titleInfo>
<name type="personal">
<namePart type="given">Judith</namePart>
<namePart type="family">Gaspers</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Quynh</namePart>
<namePart type="family">Do</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tobias</namePart>
<namePart type="family">Röding</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Melanie</namePart>
<namePart type="family">Bradford</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-04</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on Domain Adaptation for NLP</title>
</titleInfo>
<name type="personal">
<namePart type="given">Eyal</namePart>
<namePart type="family">Ben-David</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shay</namePart>
<namePart type="family">Cohen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ryan</namePart>
<namePart type="family">McDonald</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Barbara</namePart>
<namePart type="family">Plank</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Roi</namePart>
<namePart type="family">Reichart</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Guy</namePart>
<namePart type="family">Rotman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yftah</namePart>
<namePart type="family">Ziser</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Kyiv, Ukraine</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper provides the first experimental study on the impact of using domain-specific representations on a BERT-based multi-task spoken language understanding (SLU) model for multi-domain applications. Our results on a real-world dataset covering three languages indicate that by using domain-specific representations learned adversarially, model performance can be improved across all of the three SLU subtasks domain classification, intent classification and slot filling. Gains are particularly large for domains with limited training data.</abstract>
<identifier type="citekey">gaspers-etal-2021-impact</identifier>
<location>
<url>https://aclanthology.org/2021.adaptnlp-1.4/</url>
</location>
<part>
<date>2021-04</date>
<extent unit="page">
<start>28</start>
<end>32</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T The impact of domain-specific representations on BERT-based multi-domain spoken language understanding
%A Gaspers, Judith
%A Do, Quynh
%A Röding, Tobias
%A Bradford, Melanie
%Y Ben-David, Eyal
%Y Cohen, Shay
%Y McDonald, Ryan
%Y Plank, Barbara
%Y Reichart, Roi
%Y Rotman, Guy
%Y Ziser, Yftah
%S Proceedings of the Second Workshop on Domain Adaptation for NLP
%D 2021
%8 April
%I Association for Computational Linguistics
%C Kyiv, Ukraine
%F gaspers-etal-2021-impact
%X This paper provides the first experimental study on the impact of using domain-specific representations on a BERT-based multi-task spoken language understanding (SLU) model for multi-domain applications. Our results on a real-world dataset covering three languages indicate that by using domain-specific representations learned adversarially, model performance can be improved across all of the three SLU subtasks domain classification, intent classification and slot filling. Gains are particularly large for domains with limited training data.
%U https://aclanthology.org/2021.adaptnlp-1.4/
%P 28-32
Markdown (Informal)
[The impact of domain-specific representations on BERT-based multi-domain spoken language understanding](https://aclanthology.org/2021.adaptnlp-1.4/) (Gaspers et al., AdaptNLP 2021)
ACL