@inproceedings{wang-etal-2023-distractor,
title = "Distractor Generation based on {T}ext2{T}ext Language Models with Pseudo {K}ullback-{L}eibler Divergence Regulation",
author = "Wang, Hui-Juan and
Hsieh, Kai-Yu and
Yu, Han-Cheng and
Tsou, Jui-Ching and
Shih, Yu An and
Huang, Chen-Hua and
Fan, Yao-Chung",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-acl.790",
doi = "10.18653/v1/2023.findings-acl.790",
pages = "12477--12491",
abstract = "In this paper, we address the task of cloze-style multiple choice question (MCQs) distractor generation. Our study is featured by the following designs. First, we propose to formulate the cloze distractor generation as a Text2Text task. Second, we propose pseudo Kullback-Leibler Divergence for regulating the generation to consider the item discrimination index in education evaluation. Third, we explore the candidate augmentation strategy and multi-tasking training with cloze-related tasks to further boost the generation performance. Through experiments with benchmarking datasets, our best perfomring model advances the state-of-the-art result from 10.81 to 22.00 (p@1 score).",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wang-etal-2023-distractor">
<titleInfo>
<title>Distractor Generation based on Text2Text Language Models with Pseudo Kullback-Leibler Divergence Regulation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Hui-Juan</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kai-Yu</namePart>
<namePart type="family">Hsieh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Han-Cheng</namePart>
<namePart type="family">Yu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jui-Ching</namePart>
<namePart type="family">Tsou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yu</namePart>
<namePart type="given">An</namePart>
<namePart type="family">Shih</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chen-Hua</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yao-Chung</namePart>
<namePart type="family">Fan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: ACL 2023</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Rogers</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jordan</namePart>
<namePart type="family">Boyd-Graber</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Naoaki</namePart>
<namePart type="family">Okazaki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Toronto, Canada</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper, we address the task of cloze-style multiple choice question (MCQs) distractor generation. Our study is featured by the following designs. First, we propose to formulate the cloze distractor generation as a Text2Text task. Second, we propose pseudo Kullback-Leibler Divergence for regulating the generation to consider the item discrimination index in education evaluation. Third, we explore the candidate augmentation strategy and multi-tasking training with cloze-related tasks to further boost the generation performance. Through experiments with benchmarking datasets, our best perfomring model advances the state-of-the-art result from 10.81 to 22.00 (p@1 score).</abstract>
<identifier type="citekey">wang-etal-2023-distractor</identifier>
<identifier type="doi">10.18653/v1/2023.findings-acl.790</identifier>
<location>
<url>https://aclanthology.org/2023.findings-acl.790</url>
</location>
<part>
<date>2023-07</date>
<extent unit="page">
<start>12477</start>
<end>12491</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Distractor Generation based on Text2Text Language Models with Pseudo Kullback-Leibler Divergence Regulation
%A Wang, Hui-Juan
%A Hsieh, Kai-Yu
%A Yu, Han-Cheng
%A Tsou, Jui-Ching
%A Shih, Yu An
%A Huang, Chen-Hua
%A Fan, Yao-Chung
%Y Rogers, Anna
%Y Boyd-Graber, Jordan
%Y Okazaki, Naoaki
%S Findings of the Association for Computational Linguistics: ACL 2023
%D 2023
%8 July
%I Association for Computational Linguistics
%C Toronto, Canada
%F wang-etal-2023-distractor
%X In this paper, we address the task of cloze-style multiple choice question (MCQs) distractor generation. Our study is featured by the following designs. First, we propose to formulate the cloze distractor generation as a Text2Text task. Second, we propose pseudo Kullback-Leibler Divergence for regulating the generation to consider the item discrimination index in education evaluation. Third, we explore the candidate augmentation strategy and multi-tasking training with cloze-related tasks to further boost the generation performance. Through experiments with benchmarking datasets, our best perfomring model advances the state-of-the-art result from 10.81 to 22.00 (p@1 score).
%R 10.18653/v1/2023.findings-acl.790
%U https://aclanthology.org/2023.findings-acl.790
%U https://doi.org/10.18653/v1/2023.findings-acl.790
%P 12477-12491
Markdown (Informal)
[Distractor Generation based on Text2Text Language Models with Pseudo Kullback-Leibler Divergence Regulation](https://aclanthology.org/2023.findings-acl.790) (Wang et al., Findings 2023)
ACL