@inproceedings{li-etal-2022-improving-bilingual,
title = "Improving Bilingual Lexicon Induction with Cross-Encoder Reranking",
author = "Li, Yaoyiran and
Liu, Fangyu and
Vuli{\'c}, Ivan and
Korhonen, Anna",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.findings-emnlp.302/",
doi = "10.18653/v1/2022.findings-emnlp.302",
pages = "4100--4116",
abstract = "Bilingual lexicon induction (BLI) with limited bilingual supervision is a crucial yet challenging task in multilingual NLP. Current state-of-the-art BLI methods rely on the induction of cross-lingual word embeddings (CLWEs) to capture cross-lingual word similarities; such CLWEs are obtained {\ensuremath{<}}b{\ensuremath{>}}1){\ensuremath{<}}/b{\ensuremath{>}} via traditional static models (e.g., VecMap), or {\ensuremath{<}}b{\ensuremath{>}}2){\ensuremath{<}}/b{\ensuremath{>}} by extracting type-level CLWEs from multilingual pretrained language models (mPLMs), or {\ensuremath{<}}b{\ensuremath{>}}3){\ensuremath{<}}/b{\ensuremath{>}} through combining the former two options. In this work, we propose a novel semi-supervised {\ensuremath{<}}i{\ensuremath{>}}post-hoc{\ensuremath{<}}/i{\ensuremath{>}} reranking method termed {\ensuremath{<}}b{\ensuremath{>}}BLICEr{\ensuremath{<}}/b{\ensuremath{>}} ({\ensuremath{<}}b{\ensuremath{>}}BLI{\ensuremath{<}}/b{\ensuremath{>}} with {\ensuremath{<}}b{\ensuremath{>}}C{\ensuremath{<}}/b{\ensuremath{>}}ross-{\ensuremath{<}}b{\ensuremath{>}}E{\ensuremath{<}}/b{\ensuremath{>}}ncoder {\ensuremath{<}}b{\ensuremath{>}}R{\ensuremath{<}}/b{\ensuremath{>}}eranking), applicable to any precalculated CLWE space, which improves their BLI capability. The key idea is to {\textquoteleft}extract' cross-lingual lexical knowledge from mPLMs, and then combine it with the original CLWEs. This crucial step is done via {\ensuremath{<}}b{\ensuremath{>}}1){\ensuremath{<}}/b{\ensuremath{>}} creating a word similarity dataset, comprising positive word pairs (i.e., true translations) and hard negative pairs induced from the original CLWE space, and then {\ensuremath{<}}b{\ensuremath{>}}2){\ensuremath{<}}/b{\ensuremath{>}} fine-tuning an mPLM (e.g., mBERT or XLM-R) in a cross-encoder manner to predict the similarity scores. At inference, we {\ensuremath{<}}b{\ensuremath{>}}3){\ensuremath{<}}/b{\ensuremath{>}} combine the similarity score from the original CLWE space with the score from the BLI-tuned cross-encoder. BLICEr establishes new state-of-the-art results on two standard BLI benchmarks spanning a wide spectrum of diverse languages: it substantially outperforms a series of strong baselines across the board. We also validate the robustness of BLICEr with different CLWEs."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="li-etal-2022-improving-bilingual">
<titleInfo>
<title>Improving Bilingual Lexicon Induction with Cross-Encoder Reranking</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yaoyiran</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fangyu</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ivan</namePart>
<namePart type="family">Vulić</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Korhonen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2022</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yoav</namePart>
<namePart type="family">Goldberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zornitsa</namePart>
<namePart type="family">Kozareva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yue</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Bilingual lexicon induction (BLI) with limited bilingual supervision is a crucial yet challenging task in multilingual NLP. Current state-of-the-art BLI methods rely on the induction of cross-lingual word embeddings (CLWEs) to capture cross-lingual word similarities; such CLWEs are obtained \ensuremath<b\ensuremath>1)\ensuremath</b\ensuremath> via traditional static models (e.g., VecMap), or \ensuremath<b\ensuremath>2)\ensuremath</b\ensuremath> by extracting type-level CLWEs from multilingual pretrained language models (mPLMs), or \ensuremath<b\ensuremath>3)\ensuremath</b\ensuremath> through combining the former two options. In this work, we propose a novel semi-supervised \ensuremath<i\ensuremath>post-hoc\ensuremath</i\ensuremath> reranking method termed \ensuremath<b\ensuremath>BLICEr\ensuremath</b\ensuremath> (\ensuremath<b\ensuremath>BLI\ensuremath</b\ensuremath> with \ensuremath<b\ensuremath>C\ensuremath</b\ensuremath>ross-\ensuremath<b\ensuremath>E\ensuremath</b\ensuremath>ncoder \ensuremath<b\ensuremath>R\ensuremath</b\ensuremath>eranking), applicable to any precalculated CLWE space, which improves their BLI capability. The key idea is to ‘extract’ cross-lingual lexical knowledge from mPLMs, and then combine it with the original CLWEs. This crucial step is done via \ensuremath<b\ensuremath>1)\ensuremath</b\ensuremath> creating a word similarity dataset, comprising positive word pairs (i.e., true translations) and hard negative pairs induced from the original CLWE space, and then \ensuremath<b\ensuremath>2)\ensuremath</b\ensuremath> fine-tuning an mPLM (e.g., mBERT or XLM-R) in a cross-encoder manner to predict the similarity scores. At inference, we \ensuremath<b\ensuremath>3)\ensuremath</b\ensuremath> combine the similarity score from the original CLWE space with the score from the BLI-tuned cross-encoder. BLICEr establishes new state-of-the-art results on two standard BLI benchmarks spanning a wide spectrum of diverse languages: it substantially outperforms a series of strong baselines across the board. We also validate the robustness of BLICEr with different CLWEs.</abstract>
<identifier type="citekey">li-etal-2022-improving-bilingual</identifier>
<identifier type="doi">10.18653/v1/2022.findings-emnlp.302</identifier>
<location>
<url>https://aclanthology.org/2022.findings-emnlp.302/</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>4100</start>
<end>4116</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Improving Bilingual Lexicon Induction with Cross-Encoder Reranking
%A Li, Yaoyiran
%A Liu, Fangyu
%A Vulić, Ivan
%A Korhonen, Anna
%Y Goldberg, Yoav
%Y Kozareva, Zornitsa
%Y Zhang, Yue
%S Findings of the Association for Computational Linguistics: EMNLP 2022
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates
%F li-etal-2022-improving-bilingual
%X Bilingual lexicon induction (BLI) with limited bilingual supervision is a crucial yet challenging task in multilingual NLP. Current state-of-the-art BLI methods rely on the induction of cross-lingual word embeddings (CLWEs) to capture cross-lingual word similarities; such CLWEs are obtained \ensuremath<b\ensuremath>1)\ensuremath</b\ensuremath> via traditional static models (e.g., VecMap), or \ensuremath<b\ensuremath>2)\ensuremath</b\ensuremath> by extracting type-level CLWEs from multilingual pretrained language models (mPLMs), or \ensuremath<b\ensuremath>3)\ensuremath</b\ensuremath> through combining the former two options. In this work, we propose a novel semi-supervised \ensuremath<i\ensuremath>post-hoc\ensuremath</i\ensuremath> reranking method termed \ensuremath<b\ensuremath>BLICEr\ensuremath</b\ensuremath> (\ensuremath<b\ensuremath>BLI\ensuremath</b\ensuremath> with \ensuremath<b\ensuremath>C\ensuremath</b\ensuremath>ross-\ensuremath<b\ensuremath>E\ensuremath</b\ensuremath>ncoder \ensuremath<b\ensuremath>R\ensuremath</b\ensuremath>eranking), applicable to any precalculated CLWE space, which improves their BLI capability. The key idea is to ‘extract’ cross-lingual lexical knowledge from mPLMs, and then combine it with the original CLWEs. This crucial step is done via \ensuremath<b\ensuremath>1)\ensuremath</b\ensuremath> creating a word similarity dataset, comprising positive word pairs (i.e., true translations) and hard negative pairs induced from the original CLWE space, and then \ensuremath<b\ensuremath>2)\ensuremath</b\ensuremath> fine-tuning an mPLM (e.g., mBERT or XLM-R) in a cross-encoder manner to predict the similarity scores. At inference, we \ensuremath<b\ensuremath>3)\ensuremath</b\ensuremath> combine the similarity score from the original CLWE space with the score from the BLI-tuned cross-encoder. BLICEr establishes new state-of-the-art results on two standard BLI benchmarks spanning a wide spectrum of diverse languages: it substantially outperforms a series of strong baselines across the board. We also validate the robustness of BLICEr with different CLWEs.
%R 10.18653/v1/2022.findings-emnlp.302
%U https://aclanthology.org/2022.findings-emnlp.302/
%U https://doi.org/10.18653/v1/2022.findings-emnlp.302
%P 4100-4116
Markdown (Informal)
[Improving Bilingual Lexicon Induction with Cross-Encoder Reranking](https://aclanthology.org/2022.findings-emnlp.302/) (Li et al., Findings 2022)
ACL