@article{yan-etal-2022-hierarchical,
title = "Hierarchical Interpretation of Neural Text Classification",
author = "Yan, Hanqi and
Gui, Lin and
He, Yulan",
journal = "Computational Linguistics",
volume = "48",
number = "4",
month = dec,
year = "2022",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://aclanthology.org/2022.cl-4.17/",
doi = "10.1162/coli_a_00459",
pages = "987--1020",
abstract = "Recent years have witnessed increasing interest in developing interpretable models in Natural Language Processing (NLP). Most existing models aim at identifying input features such as words or phrases important for model predictions. Neural models developed in NLP, however, often compose word semantics in a hierarchical manner. As such, interpretation by words or phrases only cannot faithfully explain model decisions in text classification. This article proposes a novel Hierarchical Interpretable Neural Text classifier, called HINT, which can automatically generate explanations of model predictions in the form of label-associated topics in a hierarchical manner. Model interpretation is no longer at the word level, but built on topics as the basic semantic unit. Experimental results on both review datasets and news datasets show that our proposed approach achieves text classification results on par with existing state-of-the-art text classifiers, and generates interpretations more faithful to model predictions and better understood by humans than other interpretable neural text classifiers.1"
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yan-etal-2022-hierarchical">
<titleInfo>
<title>Hierarchical Interpretation of Neural Text Classification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Hanqi</namePart>
<namePart type="family">Yan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lin</namePart>
<namePart type="family">Gui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yulan</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<genre authority="bibutilsgt">journal article</genre>
<relatedItem type="host">
<titleInfo>
<title>Computational Linguistics</title>
</titleInfo>
<originInfo>
<issuance>continuing</issuance>
<publisher>MIT Press</publisher>
<place>
<placeTerm type="text">Cambridge, MA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">periodical</genre>
<genre authority="bibutilsgt">academic journal</genre>
</relatedItem>
<abstract>Recent years have witnessed increasing interest in developing interpretable models in Natural Language Processing (NLP). Most existing models aim at identifying input features such as words or phrases important for model predictions. Neural models developed in NLP, however, often compose word semantics in a hierarchical manner. As such, interpretation by words or phrases only cannot faithfully explain model decisions in text classification. This article proposes a novel Hierarchical Interpretable Neural Text classifier, called HINT, which can automatically generate explanations of model predictions in the form of label-associated topics in a hierarchical manner. Model interpretation is no longer at the word level, but built on topics as the basic semantic unit. Experimental results on both review datasets and news datasets show that our proposed approach achieves text classification results on par with existing state-of-the-art text classifiers, and generates interpretations more faithful to model predictions and better understood by humans than other interpretable neural text classifiers.1</abstract>
<identifier type="citekey">yan-etal-2022-hierarchical</identifier>
<identifier type="doi">10.1162/coli_a_00459</identifier>
<location>
<url>https://aclanthology.org/2022.cl-4.17/</url>
</location>
<part>
<date>2022-12</date>
<detail type="volume"><number>48</number></detail>
<detail type="issue"><number>4</number></detail>
<extent unit="page">
<start>987</start>
<end>1020</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Journal Article
%T Hierarchical Interpretation of Neural Text Classification
%A Yan, Hanqi
%A Gui, Lin
%A He, Yulan
%J Computational Linguistics
%D 2022
%8 December
%V 48
%N 4
%I MIT Press
%C Cambridge, MA
%F yan-etal-2022-hierarchical
%X Recent years have witnessed increasing interest in developing interpretable models in Natural Language Processing (NLP). Most existing models aim at identifying input features such as words or phrases important for model predictions. Neural models developed in NLP, however, often compose word semantics in a hierarchical manner. As such, interpretation by words or phrases only cannot faithfully explain model decisions in text classification. This article proposes a novel Hierarchical Interpretable Neural Text classifier, called HINT, which can automatically generate explanations of model predictions in the form of label-associated topics in a hierarchical manner. Model interpretation is no longer at the word level, but built on topics as the basic semantic unit. Experimental results on both review datasets and news datasets show that our proposed approach achieves text classification results on par with existing state-of-the-art text classifiers, and generates interpretations more faithful to model predictions and better understood by humans than other interpretable neural text classifiers.1
%R 10.1162/coli_a_00459
%U https://aclanthology.org/2022.cl-4.17/
%U https://doi.org/10.1162/coli_a_00459
%P 987-1020
Markdown (Informal)
[Hierarchical Interpretation of Neural Text Classification](https://aclanthology.org/2022.cl-4.17/) (Yan et al., CL 2022)
ACL