@inproceedings{betz-richardson-2022-deepa2,
title = "{D}eep{A}2: A Modular Framework for Deep Argument Analysis with Pretrained Neural {T}ext2{T}ext Language Models",
author = "Betz, Gregor and
Richardson, Kyle",
editor = "Nastase, Vivi and
Pavlick, Ellie and
Pilehvar, Mohammad Taher and
Camacho-Collados, Jose and
Raganato, Alessandro",
booktitle = "Proceedings of the 11th Joint Conference on Lexical and Computational Semantics",
month = jul,
year = "2022",
address = "Seattle, Washington",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.starsem-1.2/",
doi = "10.18653/v1/2022.starsem-1.2",
pages = "12--27",
abstract = "In this paper, we present and implement a multi-dimensional, modular framework for performing deep argument analysis (DeepA2) using current pre-trained language models (PTLMs). ArgumentAnalyst {--} a T5 model [Raffel et al. 2020] set up and trained within DeepA2 {--} reconstructs argumentative texts, which advance an informal argumentation, as valid arguments: It inserts, e.g., missing premises and conclusions, formalizes inferences, and coherently links the logical reconstruction to the source text. We create a synthetic corpus for deep argument analysis, and evaluate ArgumentAnalyst on this new dataset as well as on existing data, specifically EntailmentBank [Dalvi et al. 2021]. Our empirical findings vindicate the overall framework and highlight the advantages of a modular design, in particular its ability to emulate established heuristics (such as hermeneutic cycles), to explore the model`s uncertainty, to cope with the plurality of correct solutions (underdetermination), and to exploit higher-order evidence."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="betz-richardson-2022-deepa2">
<titleInfo>
<title>DeepA2: A Modular Framework for Deep Argument Analysis with Pretrained Neural Text2Text Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Gregor</namePart>
<namePart type="family">Betz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kyle</namePart>
<namePart type="family">Richardson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 11th Joint Conference on Lexical and Computational Semantics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Vivi</namePart>
<namePart type="family">Nastase</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ellie</namePart>
<namePart type="family">Pavlick</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="given">Taher</namePart>
<namePart type="family">Pilehvar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jose</namePart>
<namePart type="family">Camacho-Collados</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alessandro</namePart>
<namePart type="family">Raganato</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Seattle, Washington</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper, we present and implement a multi-dimensional, modular framework for performing deep argument analysis (DeepA2) using current pre-trained language models (PTLMs). ArgumentAnalyst – a T5 model [Raffel et al. 2020] set up and trained within DeepA2 – reconstructs argumentative texts, which advance an informal argumentation, as valid arguments: It inserts, e.g., missing premises and conclusions, formalizes inferences, and coherently links the logical reconstruction to the source text. We create a synthetic corpus for deep argument analysis, and evaluate ArgumentAnalyst on this new dataset as well as on existing data, specifically EntailmentBank [Dalvi et al. 2021]. Our empirical findings vindicate the overall framework and highlight the advantages of a modular design, in particular its ability to emulate established heuristics (such as hermeneutic cycles), to explore the model‘s uncertainty, to cope with the plurality of correct solutions (underdetermination), and to exploit higher-order evidence.</abstract>
<identifier type="citekey">betz-richardson-2022-deepa2</identifier>
<identifier type="doi">10.18653/v1/2022.starsem-1.2</identifier>
<location>
<url>https://aclanthology.org/2022.starsem-1.2/</url>
</location>
<part>
<date>2022-07</date>
<extent unit="page">
<start>12</start>
<end>27</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T DeepA2: A Modular Framework for Deep Argument Analysis with Pretrained Neural Text2Text Language Models
%A Betz, Gregor
%A Richardson, Kyle
%Y Nastase, Vivi
%Y Pavlick, Ellie
%Y Pilehvar, Mohammad Taher
%Y Camacho-Collados, Jose
%Y Raganato, Alessandro
%S Proceedings of the 11th Joint Conference on Lexical and Computational Semantics
%D 2022
%8 July
%I Association for Computational Linguistics
%C Seattle, Washington
%F betz-richardson-2022-deepa2
%X In this paper, we present and implement a multi-dimensional, modular framework for performing deep argument analysis (DeepA2) using current pre-trained language models (PTLMs). ArgumentAnalyst – a T5 model [Raffel et al. 2020] set up and trained within DeepA2 – reconstructs argumentative texts, which advance an informal argumentation, as valid arguments: It inserts, e.g., missing premises and conclusions, formalizes inferences, and coherently links the logical reconstruction to the source text. We create a synthetic corpus for deep argument analysis, and evaluate ArgumentAnalyst on this new dataset as well as on existing data, specifically EntailmentBank [Dalvi et al. 2021]. Our empirical findings vindicate the overall framework and highlight the advantages of a modular design, in particular its ability to emulate established heuristics (such as hermeneutic cycles), to explore the model‘s uncertainty, to cope with the plurality of correct solutions (underdetermination), and to exploit higher-order evidence.
%R 10.18653/v1/2022.starsem-1.2
%U https://aclanthology.org/2022.starsem-1.2/
%U https://doi.org/10.18653/v1/2022.starsem-1.2
%P 12-27
Markdown (Informal)
[DeepA2: A Modular Framework for Deep Argument Analysis with Pretrained Neural Text2Text Language Models](https://aclanthology.org/2022.starsem-1.2/) (Betz & Richardson, *SEM 2022)
ACL