@inproceedings{luo-etal-2023-deepblueai,
title = "{D}eep{B}lue{AI} at {P}rag{T}ag-2023:Ensemble-based Text Classification Approaches under Limited Data Resources",
author = "Luo, Zhipeng and
Wang, Jiahui and
Guo, Yihao",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.23/",
doi = "10.18653/v1/2023.argmining-1.23",
pages = "202--206",
abstract = "Due to the scarcity of review data and the high annotation cost, in this paper, we primarily delve into the fine-tuning of pretrained models using limited data. To enhance the robustness of the model, we employ adversarial training techniques. By introducing subtle perturbations, we compel the model to better cope with adversarial attacks, thereby increasing the stability of the model in input data. We utilize pooling techniques to aid the model in extracting critical information, reducing computational complexity, and improving the model`s generalization capability. Experimental results demonstrate the effectiveness of our proposed approach on a review paper dataset with limited data volume."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="luo-etal-2023-deepblueai">
<titleInfo>
<title>DeepBlueAI at PragTag-2023:Ensemble-based Text Classification Approaches under Limited Data Resources</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zhipeng</namePart>
<namePart type="family">Luo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jiahui</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yihao</namePart>
<namePart type="family">Guo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 10th Workshop on Argument Mining</title>
</titleInfo>
<name type="personal">
<namePart type="given">Milad</namePart>
<namePart type="family">Alshomary</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chung-Chi</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Smaranda</namePart>
<namePart type="family">Muresan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joonsuk</namePart>
<namePart type="family">Park</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Julia</namePart>
<namePart type="family">Romberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Due to the scarcity of review data and the high annotation cost, in this paper, we primarily delve into the fine-tuning of pretrained models using limited data. To enhance the robustness of the model, we employ adversarial training techniques. By introducing subtle perturbations, we compel the model to better cope with adversarial attacks, thereby increasing the stability of the model in input data. We utilize pooling techniques to aid the model in extracting critical information, reducing computational complexity, and improving the model‘s generalization capability. Experimental results demonstrate the effectiveness of our proposed approach on a review paper dataset with limited data volume.</abstract>
<identifier type="citekey">luo-etal-2023-deepblueai</identifier>
<identifier type="doi">10.18653/v1/2023.argmining-1.23</identifier>
<location>
<url>https://aclanthology.org/2023.argmining-1.23/</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>202</start>
<end>206</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T DeepBlueAI at PragTag-2023:Ensemble-based Text Classification Approaches under Limited Data Resources
%A Luo, Zhipeng
%A Wang, Jiahui
%A Guo, Yihao
%Y Alshomary, Milad
%Y Chen, Chung-Chi
%Y Muresan, Smaranda
%Y Park, Joonsuk
%Y Romberg, Julia
%S Proceedings of the 10th Workshop on Argument Mining
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F luo-etal-2023-deepblueai
%X Due to the scarcity of review data and the high annotation cost, in this paper, we primarily delve into the fine-tuning of pretrained models using limited data. To enhance the robustness of the model, we employ adversarial training techniques. By introducing subtle perturbations, we compel the model to better cope with adversarial attacks, thereby increasing the stability of the model in input data. We utilize pooling techniques to aid the model in extracting critical information, reducing computational complexity, and improving the model‘s generalization capability. Experimental results demonstrate the effectiveness of our proposed approach on a review paper dataset with limited data volume.
%R 10.18653/v1/2023.argmining-1.23
%U https://aclanthology.org/2023.argmining-1.23/
%U https://doi.org/10.18653/v1/2023.argmining-1.23
%P 202-206
Markdown (Informal)
[DeepBlueAI at PragTag-2023:Ensemble-based Text Classification Approaches under Limited Data Resources](https://aclanthology.org/2023.argmining-1.23/) (Luo et al., ArgMining 2023)
ACL