@article{nortje-etal-2024-visually,
title = "Visually Grounded Speech Models Have a Mutual Exclusivity Bias",
author = "Nortje, Leanne and
Onea{\c{t}}{\u{a}}, Dan and
Matusevych, Yevgen and
Kamper, Herman",
journal = "Transactions of the Association for Computational Linguistics",
volume = "12",
year = "2024",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://aclanthology.org/2024.tacl-1.42/",
doi = "10.1162/tacl_a_00672",
pages = "755--770",
abstract = "When children learn new words, they employ constraints such as the mutual exclusivity (ME) bias: A novel word is mapped to a novel object rather than a familiar one. This bias has been studied computationally, but only in models that use discrete word representations as input, ignoring the high variability of spoken words. We investigate the ME bias in the context of visually grounded speech models that learn from natural images and continuous speech audio. Concretely, we train a model on familiar words and test its ME bias by asking it to select between a novel and a familiar object when queried with a novel word. To simulate prior acoustic and visual knowledge, we experiment with several initialization strategies using pretrained speech and vision networks. Our findings reveal the ME bias across the different initialization approaches, with a stronger bias in models with more prior (in particular, visual) knowledge. Additional tests confirm the robustness of our results, even when different loss functions are considered. Based on detailed analyses to piece out the model`s representation space, we attribute the ME bias to how familiar and novel classes are distinctly separated in the resulting space."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="nortje-etal-2024-visually">
<titleInfo>
<title>Visually Grounded Speech Models Have a Mutual Exclusivity Bias</title>
</titleInfo>
<name type="personal">
<namePart type="given">Leanne</namePart>
<namePart type="family">Nortje</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dan</namePart>
<namePart type="family">Oneaţă</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yevgen</namePart>
<namePart type="family">Matusevych</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Herman</namePart>
<namePart type="family">Kamper</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<genre authority="bibutilsgt">journal article</genre>
<relatedItem type="host">
<titleInfo>
<title>Transactions of the Association for Computational Linguistics</title>
</titleInfo>
<originInfo>
<issuance>continuing</issuance>
<publisher>MIT Press</publisher>
<place>
<placeTerm type="text">Cambridge, MA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">periodical</genre>
<genre authority="bibutilsgt">academic journal</genre>
</relatedItem>
<abstract>When children learn new words, they employ constraints such as the mutual exclusivity (ME) bias: A novel word is mapped to a novel object rather than a familiar one. This bias has been studied computationally, but only in models that use discrete word representations as input, ignoring the high variability of spoken words. We investigate the ME bias in the context of visually grounded speech models that learn from natural images and continuous speech audio. Concretely, we train a model on familiar words and test its ME bias by asking it to select between a novel and a familiar object when queried with a novel word. To simulate prior acoustic and visual knowledge, we experiment with several initialization strategies using pretrained speech and vision networks. Our findings reveal the ME bias across the different initialization approaches, with a stronger bias in models with more prior (in particular, visual) knowledge. Additional tests confirm the robustness of our results, even when different loss functions are considered. Based on detailed analyses to piece out the model‘s representation space, we attribute the ME bias to how familiar and novel classes are distinctly separated in the resulting space.</abstract>
<identifier type="citekey">nortje-etal-2024-visually</identifier>
<identifier type="doi">10.1162/tacl_a_00672</identifier>
<location>
<url>https://aclanthology.org/2024.tacl-1.42/</url>
</location>
<part>
<date>2024</date>
<detail type="volume"><number>12</number></detail>
<extent unit="page">
<start>755</start>
<end>770</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Journal Article
%T Visually Grounded Speech Models Have a Mutual Exclusivity Bias
%A Nortje, Leanne
%A Oneaţă, Dan
%A Matusevych, Yevgen
%A Kamper, Herman
%J Transactions of the Association for Computational Linguistics
%D 2024
%V 12
%I MIT Press
%C Cambridge, MA
%F nortje-etal-2024-visually
%X When children learn new words, they employ constraints such as the mutual exclusivity (ME) bias: A novel word is mapped to a novel object rather than a familiar one. This bias has been studied computationally, but only in models that use discrete word representations as input, ignoring the high variability of spoken words. We investigate the ME bias in the context of visually grounded speech models that learn from natural images and continuous speech audio. Concretely, we train a model on familiar words and test its ME bias by asking it to select between a novel and a familiar object when queried with a novel word. To simulate prior acoustic and visual knowledge, we experiment with several initialization strategies using pretrained speech and vision networks. Our findings reveal the ME bias across the different initialization approaches, with a stronger bias in models with more prior (in particular, visual) knowledge. Additional tests confirm the robustness of our results, even when different loss functions are considered. Based on detailed analyses to piece out the model‘s representation space, we attribute the ME bias to how familiar and novel classes are distinctly separated in the resulting space.
%R 10.1162/tacl_a_00672
%U https://aclanthology.org/2024.tacl-1.42/
%U https://doi.org/10.1162/tacl_a_00672
%P 755-770
Markdown (Informal)
[Visually Grounded Speech Models Have a Mutual Exclusivity Bias](https://aclanthology.org/2024.tacl-1.42/) (Nortje et al., TACL 2024)
ACL