@inproceedings{jiqunchu-lin-2024-incorporating,
title = "Incorporating Exponential Smoothing into {MLP}: a Simple but Effective Sequence Model",
author = "JiqunChu, JiqunChu and
Lin, Zuoquan",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2024",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.findings-naacl.23",
doi = "10.18653/v1/2024.findings-naacl.23",
pages = "326--337",
abstract = "Modeling long-range dependencies in sequential data is a crucial step in sequence learning. A recently developed model, the Structured State Space (S4), demonstrated significant effectiveness in modeling long-range sequences. However, It is unclear whether the success of S4 can be attributed to its intricate parameterization and HiPPO initialization or simply due to State Space Models (SSMs). To further investigate the potential of the deep SSMs, we start with exponential smoothing (ETS), a simple SSM, and propose a stacked architecture by directly incorporating it into an element-wise MLP. We augment simple ETS with additional parameters and complex field to reduce the inductive bias. Despite increasing less than 1{\%} of parameters of element-wise MLP, our models achieve comparable results to S4 on the LRA benchmark.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="jiqunchu-lin-2024-incorporating">
<titleInfo>
<title>Incorporating Exponential Smoothing into MLP: a Simple but Effective Sequence Model</title>
</titleInfo>
<name type="personal">
<namePart type="given">JiqunChu</namePart>
<namePart type="family">JiqunChu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zuoquan</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: NAACL 2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="family">Duh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Helena</namePart>
<namePart type="family">Gomez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Bethard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mexico City, Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Modeling long-range dependencies in sequential data is a crucial step in sequence learning. A recently developed model, the Structured State Space (S4), demonstrated significant effectiveness in modeling long-range sequences. However, It is unclear whether the success of S4 can be attributed to its intricate parameterization and HiPPO initialization or simply due to State Space Models (SSMs). To further investigate the potential of the deep SSMs, we start with exponential smoothing (ETS), a simple SSM, and propose a stacked architecture by directly incorporating it into an element-wise MLP. We augment simple ETS with additional parameters and complex field to reduce the inductive bias. Despite increasing less than 1% of parameters of element-wise MLP, our models achieve comparable results to S4 on the LRA benchmark.</abstract>
<identifier type="citekey">jiqunchu-lin-2024-incorporating</identifier>
<identifier type="doi">10.18653/v1/2024.findings-naacl.23</identifier>
<location>
<url>https://aclanthology.org/2024.findings-naacl.23</url>
</location>
<part>
<date>2024-06</date>
<extent unit="page">
<start>326</start>
<end>337</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Incorporating Exponential Smoothing into MLP: a Simple but Effective Sequence Model
%A JiqunChu, JiqunChu
%A Lin, Zuoquan
%Y Duh, Kevin
%Y Gomez, Helena
%Y Bethard, Steven
%S Findings of the Association for Computational Linguistics: NAACL 2024
%D 2024
%8 June
%I Association for Computational Linguistics
%C Mexico City, Mexico
%F jiqunchu-lin-2024-incorporating
%X Modeling long-range dependencies in sequential data is a crucial step in sequence learning. A recently developed model, the Structured State Space (S4), demonstrated significant effectiveness in modeling long-range sequences. However, It is unclear whether the success of S4 can be attributed to its intricate parameterization and HiPPO initialization or simply due to State Space Models (SSMs). To further investigate the potential of the deep SSMs, we start with exponential smoothing (ETS), a simple SSM, and propose a stacked architecture by directly incorporating it into an element-wise MLP. We augment simple ETS with additional parameters and complex field to reduce the inductive bias. Despite increasing less than 1% of parameters of element-wise MLP, our models achieve comparable results to S4 on the LRA benchmark.
%R 10.18653/v1/2024.findings-naacl.23
%U https://aclanthology.org/2024.findings-naacl.23
%U https://doi.org/10.18653/v1/2024.findings-naacl.23
%P 326-337
Markdown (Informal)
[Incorporating Exponential Smoothing into MLP: a Simple but Effective Sequence Model](https://aclanthology.org/2024.findings-naacl.23) (JiqunChu & Lin, Findings 2024)
ACL