@inproceedings{khullar-arora-2020-mast,
title = "{MAST}: Multimodal Abstractive Summarization with Trimodal Hierarchical Attention",
author = "Khullar, Aman and
Arora, Udit",
editor = "Castellucci, Giuseppe and
Filice, Simone and
Poria, Soujanya and
Cambria, Erik and
Specia, Lucia",
booktitle = "Proceedings of the First International Workshop on Natural Language Processing Beyond Text",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.nlpbt-1.7",
doi = "10.18653/v1/2020.nlpbt-1.7",
pages = "60--69",
abstract = "This paper presents MAST, a new model for Multimodal Abstractive Text Summarization that utilizes information from all three modalities {--} text, audio and video {--} in a multimodal video. Prior work on multimodal abstractive text summarization only utilized information from the text and video modalities. We examine the usefulness and challenges of deriving information from the audio modality and present a sequence-to-sequence trimodal hierarchical attention-based model that overcomes these challenges by letting the model pay more attention to the text modality. MAST outperforms the current state of the art model (video-text) by 2.51 points in terms of Content F1 score and 1.00 points in terms of Rouge-L score on the How2 dataset for multimodal language understanding.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="khullar-arora-2020-mast">
<titleInfo>
<title>MAST: Multimodal Abstractive Summarization with Trimodal Hierarchical Attention</title>
</titleInfo>
<name type="personal">
<namePart type="given">Aman</namePart>
<namePart type="family">Khullar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Udit</namePart>
<namePart type="family">Arora</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First International Workshop on Natural Language Processing Beyond Text</title>
</titleInfo>
<name type="personal">
<namePart type="given">Giuseppe</namePart>
<namePart type="family">Castellucci</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Simone</namePart>
<namePart type="family">Filice</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Soujanya</namePart>
<namePart type="family">Poria</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Erik</namePart>
<namePart type="family">Cambria</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lucia</namePart>
<namePart type="family">Specia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper presents MAST, a new model for Multimodal Abstractive Text Summarization that utilizes information from all three modalities – text, audio and video – in a multimodal video. Prior work on multimodal abstractive text summarization only utilized information from the text and video modalities. We examine the usefulness and challenges of deriving information from the audio modality and present a sequence-to-sequence trimodal hierarchical attention-based model that overcomes these challenges by letting the model pay more attention to the text modality. MAST outperforms the current state of the art model (video-text) by 2.51 points in terms of Content F1 score and 1.00 points in terms of Rouge-L score on the How2 dataset for multimodal language understanding.</abstract>
<identifier type="citekey">khullar-arora-2020-mast</identifier>
<identifier type="doi">10.18653/v1/2020.nlpbt-1.7</identifier>
<location>
<url>https://aclanthology.org/2020.nlpbt-1.7</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>60</start>
<end>69</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T MAST: Multimodal Abstractive Summarization with Trimodal Hierarchical Attention
%A Khullar, Aman
%A Arora, Udit
%Y Castellucci, Giuseppe
%Y Filice, Simone
%Y Poria, Soujanya
%Y Cambria, Erik
%Y Specia, Lucia
%S Proceedings of the First International Workshop on Natural Language Processing Beyond Text
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F khullar-arora-2020-mast
%X This paper presents MAST, a new model for Multimodal Abstractive Text Summarization that utilizes information from all three modalities – text, audio and video – in a multimodal video. Prior work on multimodal abstractive text summarization only utilized information from the text and video modalities. We examine the usefulness and challenges of deriving information from the audio modality and present a sequence-to-sequence trimodal hierarchical attention-based model that overcomes these challenges by letting the model pay more attention to the text modality. MAST outperforms the current state of the art model (video-text) by 2.51 points in terms of Content F1 score and 1.00 points in terms of Rouge-L score on the How2 dataset for multimodal language understanding.
%R 10.18653/v1/2020.nlpbt-1.7
%U https://aclanthology.org/2020.nlpbt-1.7
%U https://doi.org/10.18653/v1/2020.nlpbt-1.7
%P 60-69
Markdown (Informal)
[MAST: Multimodal Abstractive Summarization with Trimodal Hierarchical Attention](https://aclanthology.org/2020.nlpbt-1.7) (Khullar & Arora, nlpbt 2020)
ACL