@inproceedings{yang-etal-2022-textpruner,
title = "{T}ext{P}runer: A Model Pruning Toolkit for Pre-Trained Language Models",
author = "Yang, Ziqing and
Cui, Yiming and
Chen, Zhigang",
editor = "Basile, Valerio and
Kozareva, Zornitsa and
Stajner, Sanja",
booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics: System Demonstrations",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.acl-demo.4/",
doi = "10.18653/v1/2022.acl-demo.4",
pages = "35--43",
abstract = "Pre-trained language models have been prevailed in natural language processing and become the backbones of many NLP tasks, but the demands for computational resources have limited their applications. In this paper, we introduce TextPruner, an open-source model pruning toolkit designed for pre-trained language models, targeting fast and easy model compression. TextPruner offers structured post-training pruning methods, including vocabulary pruning and transformer pruning, and can be applied to various models and tasks. We also propose a self-supervised pruning method that can be applied without the labeled data. Our experiments with several NLP tasks demonstrate the ability of TextPruner to reduce the model size without re-training the model."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yang-etal-2022-textpruner">
<titleInfo>
<title>TextPruner: A Model Pruning Toolkit for Pre-Trained Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ziqing</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yiming</namePart>
<namePart type="family">Cui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhigang</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics: System Demonstrations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Valerio</namePart>
<namePart type="family">Basile</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zornitsa</namePart>
<namePart type="family">Kozareva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sanja</namePart>
<namePart type="family">Stajner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Pre-trained language models have been prevailed in natural language processing and become the backbones of many NLP tasks, but the demands for computational resources have limited their applications. In this paper, we introduce TextPruner, an open-source model pruning toolkit designed for pre-trained language models, targeting fast and easy model compression. TextPruner offers structured post-training pruning methods, including vocabulary pruning and transformer pruning, and can be applied to various models and tasks. We also propose a self-supervised pruning method that can be applied without the labeled data. Our experiments with several NLP tasks demonstrate the ability of TextPruner to reduce the model size without re-training the model.</abstract>
<identifier type="citekey">yang-etal-2022-textpruner</identifier>
<identifier type="doi">10.18653/v1/2022.acl-demo.4</identifier>
<location>
<url>https://aclanthology.org/2022.acl-demo.4/</url>
</location>
<part>
<date>2022-05</date>
<extent unit="page">
<start>35</start>
<end>43</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T TextPruner: A Model Pruning Toolkit for Pre-Trained Language Models
%A Yang, Ziqing
%A Cui, Yiming
%A Chen, Zhigang
%Y Basile, Valerio
%Y Kozareva, Zornitsa
%Y Stajner, Sanja
%S Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics: System Demonstrations
%D 2022
%8 May
%I Association for Computational Linguistics
%C Dublin, Ireland
%F yang-etal-2022-textpruner
%X Pre-trained language models have been prevailed in natural language processing and become the backbones of many NLP tasks, but the demands for computational resources have limited their applications. In this paper, we introduce TextPruner, an open-source model pruning toolkit designed for pre-trained language models, targeting fast and easy model compression. TextPruner offers structured post-training pruning methods, including vocabulary pruning and transformer pruning, and can be applied to various models and tasks. We also propose a self-supervised pruning method that can be applied without the labeled data. Our experiments with several NLP tasks demonstrate the ability of TextPruner to reduce the model size without re-training the model.
%R 10.18653/v1/2022.acl-demo.4
%U https://aclanthology.org/2022.acl-demo.4/
%U https://doi.org/10.18653/v1/2022.acl-demo.4
%P 35-43
Markdown (Informal)
[TextPruner: A Model Pruning Toolkit for Pre-Trained Language Models](https://aclanthology.org/2022.acl-demo.4/) (Yang et al., ACL 2022)
ACL