@inproceedings{chen-etal-2022-using,
title = "Using Deep Mixture-of-Experts to Detect Word Meaning Shift for {T}empo{W}i{C}",
author = "Chen, Ze and
Wang, Kangxu and
Cai, Zijian and
Zheng, Jiewen and
He, Jiarong and
Gao, Max and
Zhang, Jason",
editor = "Barbieri, Francesco and
Camacho-Collados, Jose and
Dhingra, Bhuwan and
Espinosa-Anke, Luis and
Gribovskaya, Elena and
Lazaridou, Angeliki and
Loureiro, Daniel and
Neves, Leonardo",
booktitle = "Proceedings of the First Workshop on Ever Evolving NLP (EvoNLP)",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.evonlp-1.2/",
doi = "10.18653/v1/2022.evonlp-1.2",
pages = "7--11",
abstract = "This paper mainly describes the dma submission to the TempoWiC task, which achieves a macro-F1 score of 77.05{\%} and attains the first place in this task. We first explore the impact of different pre-trained language models. Then we adopt data cleaning, data augmentation, and adversarial training strategies to enhance the model generalization and robustness. For further improvement, we integrate POS information and word semantic representation using a Mixture-of-Experts (MoE) approach. The experimental results show that MoE can overcome the feature overuse issue and combine the context, POS, and word semantic features well. Additionally, we use a model ensemble method for the final prediction, which has been proven effective by many research works."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="chen-etal-2022-using">
<titleInfo>
<title>Using Deep Mixture-of-Experts to Detect Word Meaning Shift for TempoWiC</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ze</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kangxu</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zijian</namePart>
<namePart type="family">Cai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jiewen</namePart>
<namePart type="family">Zheng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jiarong</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Max</namePart>
<namePart type="family">Gao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jason</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Ever Evolving NLP (EvoNLP)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Francesco</namePart>
<namePart type="family">Barbieri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jose</namePart>
<namePart type="family">Camacho-Collados</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bhuwan</namePart>
<namePart type="family">Dhingra</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Luis</namePart>
<namePart type="family">Espinosa-Anke</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Elena</namePart>
<namePart type="family">Gribovskaya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Angeliki</namePart>
<namePart type="family">Lazaridou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Loureiro</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Leonardo</namePart>
<namePart type="family">Neves</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates (Hybrid)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper mainly describes the dma submission to the TempoWiC task, which achieves a macro-F1 score of 77.05% and attains the first place in this task. We first explore the impact of different pre-trained language models. Then we adopt data cleaning, data augmentation, and adversarial training strategies to enhance the model generalization and robustness. For further improvement, we integrate POS information and word semantic representation using a Mixture-of-Experts (MoE) approach. The experimental results show that MoE can overcome the feature overuse issue and combine the context, POS, and word semantic features well. Additionally, we use a model ensemble method for the final prediction, which has been proven effective by many research works.</abstract>
<identifier type="citekey">chen-etal-2022-using</identifier>
<identifier type="doi">10.18653/v1/2022.evonlp-1.2</identifier>
<location>
<url>https://aclanthology.org/2022.evonlp-1.2/</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>7</start>
<end>11</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Using Deep Mixture-of-Experts to Detect Word Meaning Shift for TempoWiC
%A Chen, Ze
%A Wang, Kangxu
%A Cai, Zijian
%A Zheng, Jiewen
%A He, Jiarong
%A Gao, Max
%A Zhang, Jason
%Y Barbieri, Francesco
%Y Camacho-Collados, Jose
%Y Dhingra, Bhuwan
%Y Espinosa-Anke, Luis
%Y Gribovskaya, Elena
%Y Lazaridou, Angeliki
%Y Loureiro, Daniel
%Y Neves, Leonardo
%S Proceedings of the First Workshop on Ever Evolving NLP (EvoNLP)
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates (Hybrid)
%F chen-etal-2022-using
%X This paper mainly describes the dma submission to the TempoWiC task, which achieves a macro-F1 score of 77.05% and attains the first place in this task. We first explore the impact of different pre-trained language models. Then we adopt data cleaning, data augmentation, and adversarial training strategies to enhance the model generalization and robustness. For further improvement, we integrate POS information and word semantic representation using a Mixture-of-Experts (MoE) approach. The experimental results show that MoE can overcome the feature overuse issue and combine the context, POS, and word semantic features well. Additionally, we use a model ensemble method for the final prediction, which has been proven effective by many research works.
%R 10.18653/v1/2022.evonlp-1.2
%U https://aclanthology.org/2022.evonlp-1.2/
%U https://doi.org/10.18653/v1/2022.evonlp-1.2
%P 7-11
Markdown (Informal)
[Using Deep Mixture-of-Experts to Detect Word Meaning Shift for TempoWiC](https://aclanthology.org/2022.evonlp-1.2/) (Chen et al., EvoNLP 2022)
ACL