@inproceedings{lang-etal-2024-fine,
title = "Fine-Tuning Language Models with Reward Learning on Policy",
author = "Lang, Hao and
Huang, Fei and
Li, Yongbin",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.naacl-long.75",
doi = "10.18653/v1/2024.naacl-long.75",
pages = "1382--1392",
abstract = "Reinforcement learning from human feedback (RLHF) has emerged as an effective approach to aligning large language models (LLMs) to human preferences.RLHF contains three steps, i.e., human preference collecting, reward learning, and policy optimization, which are usually performed serially.Despite its popularity, however, (fixed) reward models may suffer from inaccurate off-distribution, since policy optimization continuously shifts LLMs{'} data distribution.Repeatedly collecting new preference data from the latest LLMs may alleviate this issue, which unfortunately makes the resulting system more complicated and difficult to optimize.In this paper, we propose reward learning on policy (RLP), an unsupervised framework that refines a reward model using policy samples to keep it on-distribution.Specifically, an unsupervised multi-view learning method is introduced to learn robust representations of policy samples.Meanwhile, a synthetic preference generation approach is developed to simulate high-quality preference data with policy outputs.Extensive experiments on three benchmark datasets show that RLP consistently outperforms the state-of-the-art.Our code is available at \url{https://github.com/AlibabaResearch/DAMO-ConvAI/tree/main/rlp}.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lang-etal-2024-fine">
<titleInfo>
<title>Fine-Tuning Language Models with Reward Learning on Policy</title>
</titleInfo>
<name type="personal">
<namePart type="given">Hao</namePart>
<namePart type="family">Lang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fei</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yongbin</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="family">Duh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Helena</namePart>
<namePart type="family">Gomez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Bethard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mexico City, Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Reinforcement learning from human feedback (RLHF) has emerged as an effective approach to aligning large language models (LLMs) to human preferences.RLHF contains three steps, i.e., human preference collecting, reward learning, and policy optimization, which are usually performed serially.Despite its popularity, however, (fixed) reward models may suffer from inaccurate off-distribution, since policy optimization continuously shifts LLMs’ data distribution.Repeatedly collecting new preference data from the latest LLMs may alleviate this issue, which unfortunately makes the resulting system more complicated and difficult to optimize.In this paper, we propose reward learning on policy (RLP), an unsupervised framework that refines a reward model using policy samples to keep it on-distribution.Specifically, an unsupervised multi-view learning method is introduced to learn robust representations of policy samples.Meanwhile, a synthetic preference generation approach is developed to simulate high-quality preference data with policy outputs.Extensive experiments on three benchmark datasets show that RLP consistently outperforms the state-of-the-art.Our code is available at https://github.com/AlibabaResearch/DAMO-ConvAI/tree/main/rlp.</abstract>
<identifier type="citekey">lang-etal-2024-fine</identifier>
<identifier type="doi">10.18653/v1/2024.naacl-long.75</identifier>
<location>
<url>https://aclanthology.org/2024.naacl-long.75</url>
</location>
<part>
<date>2024-06</date>
<extent unit="page">
<start>1382</start>
<end>1392</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Fine-Tuning Language Models with Reward Learning on Policy
%A Lang, Hao
%A Huang, Fei
%A Li, Yongbin
%Y Duh, Kevin
%Y Gomez, Helena
%Y Bethard, Steven
%S Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)
%D 2024
%8 June
%I Association for Computational Linguistics
%C Mexico City, Mexico
%F lang-etal-2024-fine
%X Reinforcement learning from human feedback (RLHF) has emerged as an effective approach to aligning large language models (LLMs) to human preferences.RLHF contains three steps, i.e., human preference collecting, reward learning, and policy optimization, which are usually performed serially.Despite its popularity, however, (fixed) reward models may suffer from inaccurate off-distribution, since policy optimization continuously shifts LLMs’ data distribution.Repeatedly collecting new preference data from the latest LLMs may alleviate this issue, which unfortunately makes the resulting system more complicated and difficult to optimize.In this paper, we propose reward learning on policy (RLP), an unsupervised framework that refines a reward model using policy samples to keep it on-distribution.Specifically, an unsupervised multi-view learning method is introduced to learn robust representations of policy samples.Meanwhile, a synthetic preference generation approach is developed to simulate high-quality preference data with policy outputs.Extensive experiments on three benchmark datasets show that RLP consistently outperforms the state-of-the-art.Our code is available at https://github.com/AlibabaResearch/DAMO-ConvAI/tree/main/rlp.
%R 10.18653/v1/2024.naacl-long.75
%U https://aclanthology.org/2024.naacl-long.75
%U https://doi.org/10.18653/v1/2024.naacl-long.75
%P 1382-1392
Markdown (Informal)
[Fine-Tuning Language Models with Reward Learning on Policy](https://aclanthology.org/2024.naacl-long.75) (Lang et al., NAACL 2024)
ACL
- Hao Lang, Fei Huang, and Yongbin Li. 2024. Fine-Tuning Language Models with Reward Learning on Policy. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 1382–1392, Mexico City, Mexico. Association for Computational Linguistics.