@inproceedings{zhang-etal-2024-paying,
title = "Paying More Attention to Source Context: Mitigating Unfaithful Translations from Large Language Model",
author = "Zhang, Hongbin and
Chen, Kehai and
Bai, Xuefeng and
Xiang, Yang and
Zhang, Min",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Findings of the Association for Computational Linguistics ACL 2024",
month = aug,
year = "2024",
address = "Bangkok, Thailand and virtual meeting",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.findings-acl.821",
doi = "10.18653/v1/2024.findings-acl.821",
pages = "13816--13836",
abstract = "Large language models (LLMs) have showcased their remarkable capabilities to handle various downstream tasks, including multilingual machine translation ability. Despite their impressive performance, decoder-only LLMs lack an explicit alignment between source and target contexts, leading to translation that may not faithfully represent the original content. To address this, we propose three learning strategies to encourage LLMs to pay more attention to the source context during translation: 1) adjusting attention weights on the source context by adaptive attention re-weighting; 2) suppressing the irrelevant target prefix using contrastive decoding; 3) avoiding excessive reliance on the target prefix through target-constrained tuning. To verify the effectiveness of our model, we curate a new dataset specifically focusing on unfaithful translations generated by LLMs. Experimental results on both human-collected and general test sets verify the effectiveness of our model across multiple language pairs. Further human evaluation demonstrates the efficacy of our method in reducing hallucinatory translation and improving the fidelity of translations.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhang-etal-2024-paying">
<titleInfo>
<title>Paying More Attention to Source Context: Mitigating Unfaithful Translations from Large Language Model</title>
</titleInfo>
<name type="personal">
<namePart type="given">Hongbin</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kehai</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xuefeng</namePart>
<namePart type="family">Bai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="family">Xiang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Min</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics ACL 2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lun-Wei</namePart>
<namePart type="family">Ku</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andre</namePart>
<namePart type="family">Martins</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vivek</namePart>
<namePart type="family">Srikumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Bangkok, Thailand and virtual meeting</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Large language models (LLMs) have showcased their remarkable capabilities to handle various downstream tasks, including multilingual machine translation ability. Despite their impressive performance, decoder-only LLMs lack an explicit alignment between source and target contexts, leading to translation that may not faithfully represent the original content. To address this, we propose three learning strategies to encourage LLMs to pay more attention to the source context during translation: 1) adjusting attention weights on the source context by adaptive attention re-weighting; 2) suppressing the irrelevant target prefix using contrastive decoding; 3) avoiding excessive reliance on the target prefix through target-constrained tuning. To verify the effectiveness of our model, we curate a new dataset specifically focusing on unfaithful translations generated by LLMs. Experimental results on both human-collected and general test sets verify the effectiveness of our model across multiple language pairs. Further human evaluation demonstrates the efficacy of our method in reducing hallucinatory translation and improving the fidelity of translations.</abstract>
<identifier type="citekey">zhang-etal-2024-paying</identifier>
<identifier type="doi">10.18653/v1/2024.findings-acl.821</identifier>
<location>
<url>https://aclanthology.org/2024.findings-acl.821</url>
</location>
<part>
<date>2024-08</date>
<extent unit="page">
<start>13816</start>
<end>13836</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Paying More Attention to Source Context: Mitigating Unfaithful Translations from Large Language Model
%A Zhang, Hongbin
%A Chen, Kehai
%A Bai, Xuefeng
%A Xiang, Yang
%A Zhang, Min
%Y Ku, Lun-Wei
%Y Martins, Andre
%Y Srikumar, Vivek
%S Findings of the Association for Computational Linguistics ACL 2024
%D 2024
%8 August
%I Association for Computational Linguistics
%C Bangkok, Thailand and virtual meeting
%F zhang-etal-2024-paying
%X Large language models (LLMs) have showcased their remarkable capabilities to handle various downstream tasks, including multilingual machine translation ability. Despite their impressive performance, decoder-only LLMs lack an explicit alignment between source and target contexts, leading to translation that may not faithfully represent the original content. To address this, we propose three learning strategies to encourage LLMs to pay more attention to the source context during translation: 1) adjusting attention weights on the source context by adaptive attention re-weighting; 2) suppressing the irrelevant target prefix using contrastive decoding; 3) avoiding excessive reliance on the target prefix through target-constrained tuning. To verify the effectiveness of our model, we curate a new dataset specifically focusing on unfaithful translations generated by LLMs. Experimental results on both human-collected and general test sets verify the effectiveness of our model across multiple language pairs. Further human evaluation demonstrates the efficacy of our method in reducing hallucinatory translation and improving the fidelity of translations.
%R 10.18653/v1/2024.findings-acl.821
%U https://aclanthology.org/2024.findings-acl.821
%U https://doi.org/10.18653/v1/2024.findings-acl.821
%P 13816-13836
Markdown (Informal)
[Paying More Attention to Source Context: Mitigating Unfaithful Translations from Large Language Model](https://aclanthology.org/2024.findings-acl.821) (Zhang et al., Findings 2024)
ACL