@inproceedings{sarrouti-etal-2020-visual,
title = "Visual Question Generation from Radiology Images",
author = "Sarrouti, Mourad and
Ben Abacha, Asma and
Demner-Fushman, Dina",
editor = "Wang, Xin and
Thomason, Jesse and
Hu, Ronghang and
Chen, Xinlei and
Anderson, Peter and
Wu, Qi and
Celikyilmaz, Asli and
Baldridge, Jason and
Wang, William Yang",
booktitle = "Proceedings of the First Workshop on Advances in Language and Vision Research",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.alvr-1.3",
doi = "10.18653/v1/2020.alvr-1.3",
pages = "12--18",
abstract = "Visual Question Generation (VQG), the task of generating a question based on image contents, is an increasingly important area that combines natural language processing and computer vision. Although there are some recent works that have attempted to generate questions from images in the open domain, the task of VQG in the medical domain has not been explored so far. In this paper, we introduce an approach to generation of visual questions about radiology images called VQGR, i.e. an algorithm that is able to ask a question when shown an image. VQGR first generates new training data from the existing examples, based on contextual word embeddings and image augmentation techniques. It then uses the variational auto-encoders model to encode images into a latent space and decode natural language questions. Experimental automatic evaluations performed on the VQA-RAD dataset of clinical visual questions show that VQGR achieves good performances compared with the baseline system. The source code is available at \url{https://github.com/sarrouti/vqgr}.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="sarrouti-etal-2020-visual">
<titleInfo>
<title>Visual Question Generation from Radiology Images</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mourad</namePart>
<namePart type="family">Sarrouti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Asma</namePart>
<namePart type="family">Ben Abacha</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dina</namePart>
<namePart type="family">Demner-Fushman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Advances in Language and Vision Research</title>
</titleInfo>
<name type="personal">
<namePart type="given">Xin</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jesse</namePart>
<namePart type="family">Thomason</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ronghang</namePart>
<namePart type="family">Hu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xinlei</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Peter</namePart>
<namePart type="family">Anderson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qi</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Asli</namePart>
<namePart type="family">Celikyilmaz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jason</namePart>
<namePart type="family">Baldridge</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">William</namePart>
<namePart type="given">Yang</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Visual Question Generation (VQG), the task of generating a question based on image contents, is an increasingly important area that combines natural language processing and computer vision. Although there are some recent works that have attempted to generate questions from images in the open domain, the task of VQG in the medical domain has not been explored so far. In this paper, we introduce an approach to generation of visual questions about radiology images called VQGR, i.e. an algorithm that is able to ask a question when shown an image. VQGR first generates new training data from the existing examples, based on contextual word embeddings and image augmentation techniques. It then uses the variational auto-encoders model to encode images into a latent space and decode natural language questions. Experimental automatic evaluations performed on the VQA-RAD dataset of clinical visual questions show that VQGR achieves good performances compared with the baseline system. The source code is available at https://github.com/sarrouti/vqgr.</abstract>
<identifier type="citekey">sarrouti-etal-2020-visual</identifier>
<identifier type="doi">10.18653/v1/2020.alvr-1.3</identifier>
<location>
<url>https://aclanthology.org/2020.alvr-1.3</url>
</location>
<part>
<date>2020-07</date>
<extent unit="page">
<start>12</start>
<end>18</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Visual Question Generation from Radiology Images
%A Sarrouti, Mourad
%A Ben Abacha, Asma
%A Demner-Fushman, Dina
%Y Wang, Xin
%Y Thomason, Jesse
%Y Hu, Ronghang
%Y Chen, Xinlei
%Y Anderson, Peter
%Y Wu, Qi
%Y Celikyilmaz, Asli
%Y Baldridge, Jason
%Y Wang, William Yang
%S Proceedings of the First Workshop on Advances in Language and Vision Research
%D 2020
%8 July
%I Association for Computational Linguistics
%C Online
%F sarrouti-etal-2020-visual
%X Visual Question Generation (VQG), the task of generating a question based on image contents, is an increasingly important area that combines natural language processing and computer vision. Although there are some recent works that have attempted to generate questions from images in the open domain, the task of VQG in the medical domain has not been explored so far. In this paper, we introduce an approach to generation of visual questions about radiology images called VQGR, i.e. an algorithm that is able to ask a question when shown an image. VQGR first generates new training data from the existing examples, based on contextual word embeddings and image augmentation techniques. It then uses the variational auto-encoders model to encode images into a latent space and decode natural language questions. Experimental automatic evaluations performed on the VQA-RAD dataset of clinical visual questions show that VQGR achieves good performances compared with the baseline system. The source code is available at https://github.com/sarrouti/vqgr.
%R 10.18653/v1/2020.alvr-1.3
%U https://aclanthology.org/2020.alvr-1.3
%U https://doi.org/10.18653/v1/2020.alvr-1.3
%P 12-18
Markdown (Informal)
[Visual Question Generation from Radiology Images](https://aclanthology.org/2020.alvr-1.3) (Sarrouti et al., ALVR 2020)
ACL
- Mourad Sarrouti, Asma Ben Abacha, and Dina Demner-Fushman. 2020. Visual Question Generation from Radiology Images. In Proceedings of the First Workshop on Advances in Language and Vision Research, pages 12–18, Online. Association for Computational Linguistics.