@article{zeng-etal-2023-certified,
title = "Certified Robustness to Text Adversarial Attacks by Randomized [{MASK}]",
author = "Zeng, Jiehang and
Xu, Jianhan and
Zheng, Xiaoqing and
Huang, Xuanjing",
journal = "Computational Linguistics",
volume = "49",
number = "2",
month = jun,
year = "2023",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://aclanthology.org/2023.cl-2.5/",
doi = "10.1162/coli_a_00476",
pages = "395--427",
abstract = "Very recently, few certified defense methods have been developed to provably guarantee the robustness of a text classifier to adversarial synonym substitutions. However, all the existing certified defense methods assume that the defenders have been informed of how the adversaries generate synonyms, which is not a realistic scenario. In this study, we propose a certifiably robust defense method by randomly masking a certain proportion of the words in an input text, in which the above unrealistic assumption is no longer necessary. The proposed method can defend against not only word substitution-based attacks, but also character-level perturbations. We can certify the classifications of over 50{\%} of texts to be robust to any perturbation of five words on AGNEWS, and two words on SST2 dataset. The experimental results show that our randomized smoothing method significantly outperforms recently proposed defense methods across multiple datasets under different attack algorithms."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zeng-etal-2023-certified">
<titleInfo>
<title>Certified Robustness to Text Adversarial Attacks by Randomized [MASK]</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jiehang</namePart>
<namePart type="family">Zeng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jianhan</namePart>
<namePart type="family">Xu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiaoqing</namePart>
<namePart type="family">Zheng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xuanjing</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<genre authority="bibutilsgt">journal article</genre>
<relatedItem type="host">
<titleInfo>
<title>Computational Linguistics</title>
</titleInfo>
<originInfo>
<issuance>continuing</issuance>
<publisher>MIT Press</publisher>
<place>
<placeTerm type="text">Cambridge, MA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">periodical</genre>
<genre authority="bibutilsgt">academic journal</genre>
</relatedItem>
<abstract>Very recently, few certified defense methods have been developed to provably guarantee the robustness of a text classifier to adversarial synonym substitutions. However, all the existing certified defense methods assume that the defenders have been informed of how the adversaries generate synonyms, which is not a realistic scenario. In this study, we propose a certifiably robust defense method by randomly masking a certain proportion of the words in an input text, in which the above unrealistic assumption is no longer necessary. The proposed method can defend against not only word substitution-based attacks, but also character-level perturbations. We can certify the classifications of over 50% of texts to be robust to any perturbation of five words on AGNEWS, and two words on SST2 dataset. The experimental results show that our randomized smoothing method significantly outperforms recently proposed defense methods across multiple datasets under different attack algorithms.</abstract>
<identifier type="citekey">zeng-etal-2023-certified</identifier>
<identifier type="doi">10.1162/coli_a_00476</identifier>
<location>
<url>https://aclanthology.org/2023.cl-2.5/</url>
</location>
<part>
<date>2023-06</date>
<detail type="volume"><number>49</number></detail>
<detail type="issue"><number>2</number></detail>
<extent unit="page">
<start>395</start>
<end>427</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Journal Article
%T Certified Robustness to Text Adversarial Attacks by Randomized [MASK]
%A Zeng, Jiehang
%A Xu, Jianhan
%A Zheng, Xiaoqing
%A Huang, Xuanjing
%J Computational Linguistics
%D 2023
%8 June
%V 49
%N 2
%I MIT Press
%C Cambridge, MA
%F zeng-etal-2023-certified
%X Very recently, few certified defense methods have been developed to provably guarantee the robustness of a text classifier to adversarial synonym substitutions. However, all the existing certified defense methods assume that the defenders have been informed of how the adversaries generate synonyms, which is not a realistic scenario. In this study, we propose a certifiably robust defense method by randomly masking a certain proportion of the words in an input text, in which the above unrealistic assumption is no longer necessary. The proposed method can defend against not only word substitution-based attacks, but also character-level perturbations. We can certify the classifications of over 50% of texts to be robust to any perturbation of five words on AGNEWS, and two words on SST2 dataset. The experimental results show that our randomized smoothing method significantly outperforms recently proposed defense methods across multiple datasets under different attack algorithms.
%R 10.1162/coli_a_00476
%U https://aclanthology.org/2023.cl-2.5/
%U https://doi.org/10.1162/coli_a_00476
%P 395-427
Markdown (Informal)
[Certified Robustness to Text Adversarial Attacks by Randomized [MASK]](https://aclanthology.org/2023.cl-2.5/) (Zeng et al., CL 2023)
ACL