@inproceedings{upadhyay-etal-2022-automatic,
title = "Automatic Summarization for Creative Writing: {BART} based Pipeline Method for Generating Summary of Movie Scripts",
author = "Upadhyay, Aditya and
Bhavsar, Nidhir and
Bhatnagar, Aakash and
Singh, Muskaan and
Motlicek, Petr",
editor = "Mckeown, Kathleen",
booktitle = "Proceedings of The Workshop on Automatic Summarization for Creative Writing",
month = oct,
year = "2022",
address = "Gyeongju, Republic of Korea",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.creativesumm-1.7/",
pages = "44--50",
abstract = "This paper documents our approach for the Creative-Summ 2022 shared task for Automatic Summarization of Creative Writing. For this purpose, we develop an automatic summarization pipeline where we leverage a denoising autoencoder for pretraining sequence-to-sequence models and fine-tune it on a large-scale abstractive screenplay summarization dataset to summarize TV transcripts from primetime shows. Our pipeline divides the input transcript into smaller conversational blocks, removes redundant text, summarises the conversational blocks, obtains the block-wise summaries, cleans, structures, and then integrates the summaries to create the meeting minutes. Our proposed system achieves some of the best scores across multiple metrics(lexical, semantical) in the Creative-Summ shared task."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="upadhyay-etal-2022-automatic">
<titleInfo>
<title>Automatic Summarization for Creative Writing: BART based Pipeline Method for Generating Summary of Movie Scripts</title>
</titleInfo>
<name type="personal">
<namePart type="given">Aditya</namePart>
<namePart type="family">Upadhyay</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nidhir</namePart>
<namePart type="family">Bhavsar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aakash</namePart>
<namePart type="family">Bhatnagar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Muskaan</namePart>
<namePart type="family">Singh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Petr</namePart>
<namePart type="family">Motlicek</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of The Workshop on Automatic Summarization for Creative Writing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kathleen</namePart>
<namePart type="family">Mckeown</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Gyeongju, Republic of Korea</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper documents our approach for the Creative-Summ 2022 shared task for Automatic Summarization of Creative Writing. For this purpose, we develop an automatic summarization pipeline where we leverage a denoising autoencoder for pretraining sequence-to-sequence models and fine-tune it on a large-scale abstractive screenplay summarization dataset to summarize TV transcripts from primetime shows. Our pipeline divides the input transcript into smaller conversational blocks, removes redundant text, summarises the conversational blocks, obtains the block-wise summaries, cleans, structures, and then integrates the summaries to create the meeting minutes. Our proposed system achieves some of the best scores across multiple metrics(lexical, semantical) in the Creative-Summ shared task.</abstract>
<identifier type="citekey">upadhyay-etal-2022-automatic</identifier>
<location>
<url>https://aclanthology.org/2022.creativesumm-1.7/</url>
</location>
<part>
<date>2022-10</date>
<extent unit="page">
<start>44</start>
<end>50</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Automatic Summarization for Creative Writing: BART based Pipeline Method for Generating Summary of Movie Scripts
%A Upadhyay, Aditya
%A Bhavsar, Nidhir
%A Bhatnagar, Aakash
%A Singh, Muskaan
%A Motlicek, Petr
%Y Mckeown, Kathleen
%S Proceedings of The Workshop on Automatic Summarization for Creative Writing
%D 2022
%8 October
%I Association for Computational Linguistics
%C Gyeongju, Republic of Korea
%F upadhyay-etal-2022-automatic
%X This paper documents our approach for the Creative-Summ 2022 shared task for Automatic Summarization of Creative Writing. For this purpose, we develop an automatic summarization pipeline where we leverage a denoising autoencoder for pretraining sequence-to-sequence models and fine-tune it on a large-scale abstractive screenplay summarization dataset to summarize TV transcripts from primetime shows. Our pipeline divides the input transcript into smaller conversational blocks, removes redundant text, summarises the conversational blocks, obtains the block-wise summaries, cleans, structures, and then integrates the summaries to create the meeting minutes. Our proposed system achieves some of the best scores across multiple metrics(lexical, semantical) in the Creative-Summ shared task.
%U https://aclanthology.org/2022.creativesumm-1.7/
%P 44-50
Markdown (Informal)
[Automatic Summarization for Creative Writing: BART based Pipeline Method for Generating Summary of Movie Scripts](https://aclanthology.org/2022.creativesumm-1.7/) (Upadhyay et al., CreativeSumm 2022)
ACL