@inproceedings{qi-etal-2022-dureadervis,
title = "$\textrm{DuReader}_{\textrm{vis}}$: A {C}hinese Dataset for Open-domain Document Visual Question Answering",
author = "Qi, Le and
Lv, Shangwen and
Li, Hongyu and
Liu, Jing and
Zhang, Yu and
She, Qiaoqiao and
Wu, Hua and
Wang, Haifeng and
Liu, Ting",
editor = "Muresan, Smaranda and
Nakov, Preslav and
Villavicencio, Aline",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2022",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.findings-acl.105",
doi = "10.18653/v1/2022.findings-acl.105",
pages = "1338--1351",
abstract = "Open-domain question answering has been used in a wide range of applications, such as web search and enterprise search, which usually takes clean texts extracted from various formats of documents (e.g., web pages, PDFs, or Word documents) as the information source. However, designing different text extraction approaches is time-consuming and not scalable. In order to reduce human cost and improve the scalability of QA systems, we propose and study an $\textbf{Open-domain}$ $\textbf{Doc}$ument $\textbf{V}$isual $\textbf{Q}$uestion $\textbf{A}$nswering (Open-domain DocVQA) task, which requires answering questions based on a collection of document images directly instead of only document texts, utilizing layouts and visual features additionally. Towards this end, we introduce the first Chinese Open-domain DocVQA dataset called $\textrm{DuReader}_{\textrm{vis}}$, containing about 15K question-answering pairs and 158K document images from the Baidu search engine. There are three main challenges in $\textrm{DuReader}_{\textrm{vis}}$: (1) long document understanding, (2) noisy texts, and (3) multi-span answer extraction. The extensive experiments demonstrate that the dataset is challenging. Additionally, we propose a simple approach that incorporates the layout and visual features, and the experimental results show the effectiveness of the proposed approach. The dataset and code will be publicly available at \url{https://github.com/baidu/DuReader/tree/master/DuReader-vis}.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="qi-etal-2022-dureadervis">
<titleInfo>
<title>DuReader_vis: A Chinese Dataset for Open-domain Document Visual Question Answering</title>
</titleInfo>
<name type="personal">
<namePart type="given">Le</namePart>
<namePart type="family">Qi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shangwen</namePart>
<namePart type="family">Lv</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hongyu</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jing</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yu</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qiaoqiao</namePart>
<namePart type="family">She</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hua</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Haifeng</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ting</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: ACL 2022</title>
</titleInfo>
<name type="personal">
<namePart type="given">Smaranda</namePart>
<namePart type="family">Muresan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Preslav</namePart>
<namePart type="family">Nakov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aline</namePart>
<namePart type="family">Villavicencio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Open-domain question answering has been used in a wide range of applications, such as web search and enterprise search, which usually takes clean texts extracted from various formats of documents (e.g., web pages, PDFs, or Word documents) as the information source. However, designing different text extraction approaches is time-consuming and not scalable. In order to reduce human cost and improve the scalability of QA systems, we propose and study an Open-domain Document Visual Question Answering (Open-domain DocVQA) task, which requires answering questions based on a collection of document images directly instead of only document texts, utilizing layouts and visual features additionally. Towards this end, we introduce the first Chinese Open-domain DocVQA dataset called DuReader_vis, containing about 15K question-answering pairs and 158K document images from the Baidu search engine. There are three main challenges in DuReader_vis: (1) long document understanding, (2) noisy texts, and (3) multi-span answer extraction. The extensive experiments demonstrate that the dataset is challenging. Additionally, we propose a simple approach that incorporates the layout and visual features, and the experimental results show the effectiveness of the proposed approach. The dataset and code will be publicly available at https://github.com/baidu/DuReader/tree/master/DuReader-vis.</abstract>
<identifier type="citekey">qi-etal-2022-dureadervis</identifier>
<identifier type="doi">10.18653/v1/2022.findings-acl.105</identifier>
<location>
<url>https://aclanthology.org/2022.findings-acl.105</url>
</location>
<part>
<date>2022-05</date>
<extent unit="page">
<start>1338</start>
<end>1351</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T DuReader_vis: A Chinese Dataset for Open-domain Document Visual Question Answering
%A Qi, Le
%A Lv, Shangwen
%A Li, Hongyu
%A Liu, Jing
%A Zhang, Yu
%A She, Qiaoqiao
%A Wu, Hua
%A Wang, Haifeng
%A Liu, Ting
%Y Muresan, Smaranda
%Y Nakov, Preslav
%Y Villavicencio, Aline
%S Findings of the Association for Computational Linguistics: ACL 2022
%D 2022
%8 May
%I Association for Computational Linguistics
%C Dublin, Ireland
%F qi-etal-2022-dureadervis
%X Open-domain question answering has been used in a wide range of applications, such as web search and enterprise search, which usually takes clean texts extracted from various formats of documents (e.g., web pages, PDFs, or Word documents) as the information source. However, designing different text extraction approaches is time-consuming and not scalable. In order to reduce human cost and improve the scalability of QA systems, we propose and study an Open-domain Document Visual Question Answering (Open-domain DocVQA) task, which requires answering questions based on a collection of document images directly instead of only document texts, utilizing layouts and visual features additionally. Towards this end, we introduce the first Chinese Open-domain DocVQA dataset called DuReader_vis, containing about 15K question-answering pairs and 158K document images from the Baidu search engine. There are three main challenges in DuReader_vis: (1) long document understanding, (2) noisy texts, and (3) multi-span answer extraction. The extensive experiments demonstrate that the dataset is challenging. Additionally, we propose a simple approach that incorporates the layout and visual features, and the experimental results show the effectiveness of the proposed approach. The dataset and code will be publicly available at https://github.com/baidu/DuReader/tree/master/DuReader-vis.
%R 10.18653/v1/2022.findings-acl.105
%U https://aclanthology.org/2022.findings-acl.105
%U https://doi.org/10.18653/v1/2022.findings-acl.105
%P 1338-1351
Markdown (Informal)
[DuReadervis: A Chinese Dataset for Open-domain Document Visual Question Answering](https://aclanthology.org/2022.findings-acl.105) (Qi et al., Findings 2022)
ACL
- Le Qi, Shangwen Lv, Hongyu Li, Jing Liu, Yu Zhang, Qiaoqiao She, Hua Wu, Haifeng Wang, and Ting Liu. 2022. DuReadervis: A Chinese Dataset for Open-domain Document Visual Question Answering. In Findings of the Association for Computational Linguistics: ACL 2022, pages 1338–1351, Dublin, Ireland. Association for Computational Linguistics.