@inproceedings{higy-etal-2020-textual,
title = "Textual {S}upervision for {V}isually {G}rounded {S}poken {L}anguage {U}nderstanding",
author = "Higy, Bertrand and
Elliott, Desmond and
Chrupa{\l}a, Grzegorz",
editor = "Cohn, Trevor and
He, Yulan and
Liu, Yang",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.findings-emnlp.244/",
doi = "10.18653/v1/2020.findings-emnlp.244",
pages = "2698--2709",
abstract = "Visually-grounded models of spoken language understanding extract semantic information directly from speech, without relying on transcriptions. This is useful for low-resource languages, where transcriptions can be expensive or impossible to obtain. Recent work showed that these models can be improved if transcriptions are available at training time. However, it is not clear how an end-to-end approach compares to a traditional pipeline-based approach when one has access to transcriptions. Comparing different strategies, we find that the pipeline approach works better when enough text is available. With low-resource languages in mind, we also show that translations can be effectively used in place of transcriptions but more data is needed to obtain similar results."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="higy-etal-2020-textual">
<titleInfo>
<title>Textual Supervision for Visually Grounded Spoken Language Understanding</title>
</titleInfo>
<name type="personal">
<namePart type="given">Bertrand</namePart>
<namePart type="family">Higy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Desmond</namePart>
<namePart type="family">Elliott</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Grzegorz</namePart>
<namePart type="family">Chrupała</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2020</title>
</titleInfo>
<name type="personal">
<namePart type="given">Trevor</namePart>
<namePart type="family">Cohn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yulan</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Visually-grounded models of spoken language understanding extract semantic information directly from speech, without relying on transcriptions. This is useful for low-resource languages, where transcriptions can be expensive or impossible to obtain. Recent work showed that these models can be improved if transcriptions are available at training time. However, it is not clear how an end-to-end approach compares to a traditional pipeline-based approach when one has access to transcriptions. Comparing different strategies, we find that the pipeline approach works better when enough text is available. With low-resource languages in mind, we also show that translations can be effectively used in place of transcriptions but more data is needed to obtain similar results.</abstract>
<identifier type="citekey">higy-etal-2020-textual</identifier>
<identifier type="doi">10.18653/v1/2020.findings-emnlp.244</identifier>
<location>
<url>https://aclanthology.org/2020.findings-emnlp.244/</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>2698</start>
<end>2709</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Textual Supervision for Visually Grounded Spoken Language Understanding
%A Higy, Bertrand
%A Elliott, Desmond
%A Chrupała, Grzegorz
%Y Cohn, Trevor
%Y He, Yulan
%Y Liu, Yang
%S Findings of the Association for Computational Linguistics: EMNLP 2020
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F higy-etal-2020-textual
%X Visually-grounded models of spoken language understanding extract semantic information directly from speech, without relying on transcriptions. This is useful for low-resource languages, where transcriptions can be expensive or impossible to obtain. Recent work showed that these models can be improved if transcriptions are available at training time. However, it is not clear how an end-to-end approach compares to a traditional pipeline-based approach when one has access to transcriptions. Comparing different strategies, we find that the pipeline approach works better when enough text is available. With low-resource languages in mind, we also show that translations can be effectively used in place of transcriptions but more data is needed to obtain similar results.
%R 10.18653/v1/2020.findings-emnlp.244
%U https://aclanthology.org/2020.findings-emnlp.244/
%U https://doi.org/10.18653/v1/2020.findings-emnlp.244
%P 2698-2709
Markdown (Informal)
[Textual Supervision for Visually Grounded Spoken Language Understanding](https://aclanthology.org/2020.findings-emnlp.244/) (Higy et al., Findings 2020)
ACL