@inproceedings{bi-etal-2024-adaptive,
title = "Adaptive Token Biaser: Knowledge Editing via Biasing Key Entities",
author = "Bi, Baolong and
Liu, Shenghua and
Wang, Yiwei and
Mei, Lingrui and
Gao, Hongcheng and
Xu, Yilong and
Cheng, Xueqi",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.findings-emnlp.647",
doi = "10.18653/v1/2024.findings-emnlp.647",
pages = "11071--11083",
abstract = "The parametric knowledge memorized by large language models (LLMs) becomes outdated quickly. In-context editing (ICE) is currently the most effective method for updating the knowledge of LLMs. Recent advancements involve enhancing ICE by modifying the decoding strategy, obviating the need for altering internal model structures or adjusting external prompts.However, this enhancement operates across the entire sequence generation, encompassing a plethora of non-critical tokens.In this work, we introduce **A**daptive **T**oken **Bias**er (ATBias), a new decoding technique designed to enhance ICE.It focuses on the tokens that are mostly related to knowledge during decoding, biasing their logits by matching key entities related to new and parametric knowledge.Experimental results show that ATBias significantly enhances ICE performance, achieving up to a 32.3{\%} improvement over state-of-the-art ICE methods while incurring only half the latency.ATBias not only improves the knowledge editing capabilities of ICE but can also be widely applied to LLMs with negligible cost.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="bi-etal-2024-adaptive">
<titleInfo>
<title>Adaptive Token Biaser: Knowledge Editing via Biasing Key Entities</title>
</titleInfo>
<name type="personal">
<namePart type="given">Baolong</namePart>
<namePart type="family">Bi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shenghua</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yiwei</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lingrui</namePart>
<namePart type="family">Mei</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hongcheng</namePart>
<namePart type="family">Gao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yilong</namePart>
<namePart type="family">Xu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xueqi</namePart>
<namePart type="family">Cheng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yaser</namePart>
<namePart type="family">Al-Onaizan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Bansal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The parametric knowledge memorized by large language models (LLMs) becomes outdated quickly. In-context editing (ICE) is currently the most effective method for updating the knowledge of LLMs. Recent advancements involve enhancing ICE by modifying the decoding strategy, obviating the need for altering internal model structures or adjusting external prompts.However, this enhancement operates across the entire sequence generation, encompassing a plethora of non-critical tokens.In this work, we introduce **A**daptive **T**oken **Bias**er (ATBias), a new decoding technique designed to enhance ICE.It focuses on the tokens that are mostly related to knowledge during decoding, biasing their logits by matching key entities related to new and parametric knowledge.Experimental results show that ATBias significantly enhances ICE performance, achieving up to a 32.3% improvement over state-of-the-art ICE methods while incurring only half the latency.ATBias not only improves the knowledge editing capabilities of ICE but can also be widely applied to LLMs with negligible cost.</abstract>
<identifier type="citekey">bi-etal-2024-adaptive</identifier>
<identifier type="doi">10.18653/v1/2024.findings-emnlp.647</identifier>
<location>
<url>https://aclanthology.org/2024.findings-emnlp.647</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>11071</start>
<end>11083</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Adaptive Token Biaser: Knowledge Editing via Biasing Key Entities
%A Bi, Baolong
%A Liu, Shenghua
%A Wang, Yiwei
%A Mei, Lingrui
%A Gao, Hongcheng
%A Xu, Yilong
%A Cheng, Xueqi
%Y Al-Onaizan, Yaser
%Y Bansal, Mohit
%Y Chen, Yun-Nung
%S Findings of the Association for Computational Linguistics: EMNLP 2024
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, USA
%F bi-etal-2024-adaptive
%X The parametric knowledge memorized by large language models (LLMs) becomes outdated quickly. In-context editing (ICE) is currently the most effective method for updating the knowledge of LLMs. Recent advancements involve enhancing ICE by modifying the decoding strategy, obviating the need for altering internal model structures or adjusting external prompts.However, this enhancement operates across the entire sequence generation, encompassing a plethora of non-critical tokens.In this work, we introduce **A**daptive **T**oken **Bias**er (ATBias), a new decoding technique designed to enhance ICE.It focuses on the tokens that are mostly related to knowledge during decoding, biasing their logits by matching key entities related to new and parametric knowledge.Experimental results show that ATBias significantly enhances ICE performance, achieving up to a 32.3% improvement over state-of-the-art ICE methods while incurring only half the latency.ATBias not only improves the knowledge editing capabilities of ICE but can also be widely applied to LLMs with negligible cost.
%R 10.18653/v1/2024.findings-emnlp.647
%U https://aclanthology.org/2024.findings-emnlp.647
%U https://doi.org/10.18653/v1/2024.findings-emnlp.647
%P 11071-11083
Markdown (Informal)
[Adaptive Token Biaser: Knowledge Editing via Biasing Key Entities](https://aclanthology.org/2024.findings-emnlp.647) (Bi et al., Findings 2024)
ACL
- Baolong Bi, Shenghua Liu, Yiwei Wang, Lingrui Mei, Hongcheng Gao, Yilong Xu, and Xueqi Cheng. 2024. Adaptive Token Biaser: Knowledge Editing via Biasing Key Entities. In Findings of the Association for Computational Linguistics: EMNLP 2024, pages 11071–11083, Miami, Florida, USA. Association for Computational Linguistics.