@inproceedings{mitsui-etal-2024-pslm,
title = "{PSLM}: Parallel Generation of Text and Speech with {LLM}s for Low-Latency Spoken Dialogue Systems",
author = "Mitsui, Kentaro and
Mitsuda, Koh and
Wakatsuki, Toshiaki and
Hono, Yukiya and
Sawada, Kei",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.findings-emnlp.151",
doi = "10.18653/v1/2024.findings-emnlp.151",
pages = "2692--2700",
abstract = "Multimodal language models that process both text and speech have a potential for applications in spoken dialogue systems. However, current models face two major challenges in response generation latency: (1) generating a spoken response requires the prior generation of a written response, and (2) speech sequences are significantly longer than text sequences. This study addresses these issues by extending the input and output sequences of the language model to support the parallel generation of text and speech. Our experiments on spoken question answering tasks demonstrate that our approach improves latency while maintaining the quality of response content. Additionally, we show that latency can be further reduced by generating speech in multiple sequences. Demo samples are available at https://rinnakk.github.io/research/publications/PSLM.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="mitsui-etal-2024-pslm">
<titleInfo>
<title>PSLM: Parallel Generation of Text and Speech with LLMs for Low-Latency Spoken Dialogue Systems</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kentaro</namePart>
<namePart type="family">Mitsui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Koh</namePart>
<namePart type="family">Mitsuda</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Toshiaki</namePart>
<namePart type="family">Wakatsuki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yukiya</namePart>
<namePart type="family">Hono</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kei</namePart>
<namePart type="family">Sawada</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yaser</namePart>
<namePart type="family">Al-Onaizan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Bansal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Multimodal language models that process both text and speech have a potential for applications in spoken dialogue systems. However, current models face two major challenges in response generation latency: (1) generating a spoken response requires the prior generation of a written response, and (2) speech sequences are significantly longer than text sequences. This study addresses these issues by extending the input and output sequences of the language model to support the parallel generation of text and speech. Our experiments on spoken question answering tasks demonstrate that our approach improves latency while maintaining the quality of response content. Additionally, we show that latency can be further reduced by generating speech in multiple sequences. Demo samples are available at https://rinnakk.github.io/research/publications/PSLM.</abstract>
<identifier type="citekey">mitsui-etal-2024-pslm</identifier>
<identifier type="doi">10.18653/v1/2024.findings-emnlp.151</identifier>
<location>
<url>https://aclanthology.org/2024.findings-emnlp.151</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>2692</start>
<end>2700</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T PSLM: Parallel Generation of Text and Speech with LLMs for Low-Latency Spoken Dialogue Systems
%A Mitsui, Kentaro
%A Mitsuda, Koh
%A Wakatsuki, Toshiaki
%A Hono, Yukiya
%A Sawada, Kei
%Y Al-Onaizan, Yaser
%Y Bansal, Mohit
%Y Chen, Yun-Nung
%S Findings of the Association for Computational Linguistics: EMNLP 2024
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, USA
%F mitsui-etal-2024-pslm
%X Multimodal language models that process both text and speech have a potential for applications in spoken dialogue systems. However, current models face two major challenges in response generation latency: (1) generating a spoken response requires the prior generation of a written response, and (2) speech sequences are significantly longer than text sequences. This study addresses these issues by extending the input and output sequences of the language model to support the parallel generation of text and speech. Our experiments on spoken question answering tasks demonstrate that our approach improves latency while maintaining the quality of response content. Additionally, we show that latency can be further reduced by generating speech in multiple sequences. Demo samples are available at https://rinnakk.github.io/research/publications/PSLM.
%R 10.18653/v1/2024.findings-emnlp.151
%U https://aclanthology.org/2024.findings-emnlp.151
%U https://doi.org/10.18653/v1/2024.findings-emnlp.151
%P 2692-2700
Markdown (Informal)
[PSLM: Parallel Generation of Text and Speech with LLMs for Low-Latency Spoken Dialogue Systems](https://aclanthology.org/2024.findings-emnlp.151) (Mitsui et al., Findings 2024)
ACL