@inproceedings{lee-etal-2023-mafid,
title = "{MAF}i{D}: Moving Average Equipped Fusion-in-Decoder for Question Answering over Tabular and Textual Data",
author = "Lee, Sung-Min and
Park, Eunhwan and
Seo, Daeryong and
Jeon, Donghyeon and
Kang, Inho and
Na, Seung-Hoon",
editor = "Vlachos, Andreas and
Augenstein, Isabelle",
booktitle = "Findings of the Association for Computational Linguistics: EACL 2023",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-eacl.177/",
doi = "10.18653/v1/2023.findings-eacl.177",
pages = "2337--2344",
abstract = "Transformer-based models for question answering (QA) over tables and texts confront a {\textquotedblleft}long{\textquotedblright} hybrid sequence over tabular and textual elements, causing long-range reasoning problems. To handle long-range reasoning, we extensively employ a fusion-in-decoder (FiD) and exponential moving average (EMA), proposing a Moving Average Equipped Fusion-in-Decoder (\textbf{MAFiD}). With FiD as the backbone architecture, MAFiD combines various levels of reasoning: \textit{independent encoding} of homogeneous data and \textit{single-row} and \textit{multi-row heterogeneous reasoning}, using a \textit{gated cross attention layer} to effectively aggregate the three types of representations resulting from various reasonings. Experimental results on HybridQA indicate that MAFiD achieves state-of-the-art performance by increasing exact matching (EM) and F1 by 1.1 and 1.7, respectively, on the blind test set."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lee-etal-2023-mafid">
<titleInfo>
<title>MAFiD: Moving Average Equipped Fusion-in-Decoder for Question Answering over Tabular and Textual Data</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sung-Min</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eunhwan</namePart>
<namePart type="family">Park</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daeryong</namePart>
<namePart type="family">Seo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Donghyeon</namePart>
<namePart type="family">Jeon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Inho</namePart>
<namePart type="family">Kang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Seung-Hoon</namePart>
<namePart type="family">Na</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EACL 2023</title>
</titleInfo>
<name type="personal">
<namePart type="given">Andreas</namePart>
<namePart type="family">Vlachos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Isabelle</namePart>
<namePart type="family">Augenstein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dubrovnik, Croatia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Transformer-based models for question answering (QA) over tables and texts confront a “long” hybrid sequence over tabular and textual elements, causing long-range reasoning problems. To handle long-range reasoning, we extensively employ a fusion-in-decoder (FiD) and exponential moving average (EMA), proposing a Moving Average Equipped Fusion-in-Decoder (MAFiD). With FiD as the backbone architecture, MAFiD combines various levels of reasoning: independent encoding of homogeneous data and single-row and multi-row heterogeneous reasoning, using a gated cross attention layer to effectively aggregate the three types of representations resulting from various reasonings. Experimental results on HybridQA indicate that MAFiD achieves state-of-the-art performance by increasing exact matching (EM) and F1 by 1.1 and 1.7, respectively, on the blind test set.</abstract>
<identifier type="citekey">lee-etal-2023-mafid</identifier>
<identifier type="doi">10.18653/v1/2023.findings-eacl.177</identifier>
<location>
<url>https://aclanthology.org/2023.findings-eacl.177/</url>
</location>
<part>
<date>2023-05</date>
<extent unit="page">
<start>2337</start>
<end>2344</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T MAFiD: Moving Average Equipped Fusion-in-Decoder for Question Answering over Tabular and Textual Data
%A Lee, Sung-Min
%A Park, Eunhwan
%A Seo, Daeryong
%A Jeon, Donghyeon
%A Kang, Inho
%A Na, Seung-Hoon
%Y Vlachos, Andreas
%Y Augenstein, Isabelle
%S Findings of the Association for Computational Linguistics: EACL 2023
%D 2023
%8 May
%I Association for Computational Linguistics
%C Dubrovnik, Croatia
%F lee-etal-2023-mafid
%X Transformer-based models for question answering (QA) over tables and texts confront a “long” hybrid sequence over tabular and textual elements, causing long-range reasoning problems. To handle long-range reasoning, we extensively employ a fusion-in-decoder (FiD) and exponential moving average (EMA), proposing a Moving Average Equipped Fusion-in-Decoder (MAFiD). With FiD as the backbone architecture, MAFiD combines various levels of reasoning: independent encoding of homogeneous data and single-row and multi-row heterogeneous reasoning, using a gated cross attention layer to effectively aggregate the three types of representations resulting from various reasonings. Experimental results on HybridQA indicate that MAFiD achieves state-of-the-art performance by increasing exact matching (EM) and F1 by 1.1 and 1.7, respectively, on the blind test set.
%R 10.18653/v1/2023.findings-eacl.177
%U https://aclanthology.org/2023.findings-eacl.177/
%U https://doi.org/10.18653/v1/2023.findings-eacl.177
%P 2337-2344
Markdown (Informal)
[MAFiD: Moving Average Equipped Fusion-in-Decoder for Question Answering over Tabular and Textual Data](https://aclanthology.org/2023.findings-eacl.177/) (Lee et al., Findings 2023)
ACL