@inproceedings{van-der-meer-etal-2022-will,
title = "Will It Blend? Mixing Training Paradigms {\&} Prompting for Argument Quality Prediction",
author = "van der Meer, Michiel and
Reuver, Myrthe and
Khurana, Urja and
Krause, Lea and
Baez Santamaria, Selene",
editor = "Lapesa, Gabriella and
Schneider, Jodi and
Jo, Yohan and
Saha, Sougata",
booktitle = "Proceedings of the 9th Workshop on Argument Mining",
month = oct,
year = "2022",
address = "Online and in Gyeongju, Republic of Korea",
publisher = "International Conference on Computational Linguistics",
url = "https://aclanthology.org/2022.argmining-1.8",
pages = "95--103",
abstract = "This paper describes our contributions to the Shared Task of the 9th Workshop on Argument Mining (2022). Our approach uses Large Language Models for the task of Argument Quality Prediction. We perform prompt engineering using GPT-3, and also investigate the training paradigms multi-task learning, contrastive learning, and intermediate-task training. We find that a mixed prediction setup outperforms single models. Prompting GPT-3 works best for predicting argument validity, and argument novelty is best estimated by a model trained using all three training paradigms.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="van-der-meer-etal-2022-will">
<titleInfo>
<title>Will It Blend? Mixing Training Paradigms & Prompting for Argument Quality Prediction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Michiel</namePart>
<namePart type="family">van der Meer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Myrthe</namePart>
<namePart type="family">Reuver</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Urja</namePart>
<namePart type="family">Khurana</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lea</namePart>
<namePart type="family">Krause</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Selene</namePart>
<namePart type="family">Baez Santamaria</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 9th Workshop on Argument Mining</title>
</titleInfo>
<name type="personal">
<namePart type="given">Gabriella</namePart>
<namePart type="family">Lapesa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jodi</namePart>
<namePart type="family">Schneider</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yohan</namePart>
<namePart type="family">Jo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sougata</namePart>
<namePart type="family">Saha</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>International Conference on Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online and in Gyeongju, Republic of Korea</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper describes our contributions to the Shared Task of the 9th Workshop on Argument Mining (2022). Our approach uses Large Language Models for the task of Argument Quality Prediction. We perform prompt engineering using GPT-3, and also investigate the training paradigms multi-task learning, contrastive learning, and intermediate-task training. We find that a mixed prediction setup outperforms single models. Prompting GPT-3 works best for predicting argument validity, and argument novelty is best estimated by a model trained using all three training paradigms.</abstract>
<identifier type="citekey">van-der-meer-etal-2022-will</identifier>
<location>
<url>https://aclanthology.org/2022.argmining-1.8</url>
</location>
<part>
<date>2022-10</date>
<extent unit="page">
<start>95</start>
<end>103</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Will It Blend? Mixing Training Paradigms & Prompting for Argument Quality Prediction
%A van der Meer, Michiel
%A Reuver, Myrthe
%A Khurana, Urja
%A Krause, Lea
%A Baez Santamaria, Selene
%Y Lapesa, Gabriella
%Y Schneider, Jodi
%Y Jo, Yohan
%Y Saha, Sougata
%S Proceedings of the 9th Workshop on Argument Mining
%D 2022
%8 October
%I International Conference on Computational Linguistics
%C Online and in Gyeongju, Republic of Korea
%F van-der-meer-etal-2022-will
%X This paper describes our contributions to the Shared Task of the 9th Workshop on Argument Mining (2022). Our approach uses Large Language Models for the task of Argument Quality Prediction. We perform prompt engineering using GPT-3, and also investigate the training paradigms multi-task learning, contrastive learning, and intermediate-task training. We find that a mixed prediction setup outperforms single models. Prompting GPT-3 works best for predicting argument validity, and argument novelty is best estimated by a model trained using all three training paradigms.
%U https://aclanthology.org/2022.argmining-1.8
%P 95-103
Markdown (Informal)
[Will It Blend? Mixing Training Paradigms & Prompting for Argument Quality Prediction](https://aclanthology.org/2022.argmining-1.8) (van der Meer et al., ArgMining 2022)
ACL