@inproceedings{komada-inui-2020-element,
title = "An Element-wise Visual-enhanced {B}i{LSTM}-{CRF} Model for Location Name Recognition",
author = "Komada, Takuya and
Inui, Takashi",
editor = "Kordjamshidi, Parisa and
Bhatia, Archna and
Alikhani, Malihe and
Baldridge, Jason and
Bansal, Mohit and
Moens, Marie-Francine",
booktitle = "Proceedings of the Third International Workshop on Spatial Language Understanding",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.splu-1.1",
doi = "10.18653/v1/2020.splu-1.1",
pages = "1--9",
abstract = "In recent years, previous studies have used visual information in named entity recognition (NER) for social media posts with attached images. However, these methods can only be applied to documents with attached images. In this paper, we propose a NER method that can use element-wise visual information for any documents by using image data corresponding to each word in the document. The proposed method obtains element-wise image data using an image retrieval engine, to be used as extra features in the neural NER model. Experimental results on the standard Japanese NER dataset show that the proposed method achieves a higher F1 value (89.67{\%}) than a baseline method, demonstrating the effectiveness of using element-wise visual information.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="komada-inui-2020-element">
<titleInfo>
<title>An Element-wise Visual-enhanced BiLSTM-CRF Model for Location Name Recognition</title>
</titleInfo>
<name type="personal">
<namePart type="given">Takuya</namePart>
<namePart type="family">Komada</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Takashi</namePart>
<namePart type="family">Inui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Third International Workshop on Spatial Language Understanding</title>
</titleInfo>
<name type="personal">
<namePart type="given">Parisa</namePart>
<namePart type="family">Kordjamshidi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Archna</namePart>
<namePart type="family">Bhatia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Malihe</namePart>
<namePart type="family">Alikhani</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jason</namePart>
<namePart type="family">Baldridge</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Bansal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marie-Francine</namePart>
<namePart type="family">Moens</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In recent years, previous studies have used visual information in named entity recognition (NER) for social media posts with attached images. However, these methods can only be applied to documents with attached images. In this paper, we propose a NER method that can use element-wise visual information for any documents by using image data corresponding to each word in the document. The proposed method obtains element-wise image data using an image retrieval engine, to be used as extra features in the neural NER model. Experimental results on the standard Japanese NER dataset show that the proposed method achieves a higher F1 value (89.67%) than a baseline method, demonstrating the effectiveness of using element-wise visual information.</abstract>
<identifier type="citekey">komada-inui-2020-element</identifier>
<identifier type="doi">10.18653/v1/2020.splu-1.1</identifier>
<location>
<url>https://aclanthology.org/2020.splu-1.1</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>1</start>
<end>9</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T An Element-wise Visual-enhanced BiLSTM-CRF Model for Location Name Recognition
%A Komada, Takuya
%A Inui, Takashi
%Y Kordjamshidi, Parisa
%Y Bhatia, Archna
%Y Alikhani, Malihe
%Y Baldridge, Jason
%Y Bansal, Mohit
%Y Moens, Marie-Francine
%S Proceedings of the Third International Workshop on Spatial Language Understanding
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F komada-inui-2020-element
%X In recent years, previous studies have used visual information in named entity recognition (NER) for social media posts with attached images. However, these methods can only be applied to documents with attached images. In this paper, we propose a NER method that can use element-wise visual information for any documents by using image data corresponding to each word in the document. The proposed method obtains element-wise image data using an image retrieval engine, to be used as extra features in the neural NER model. Experimental results on the standard Japanese NER dataset show that the proposed method achieves a higher F1 value (89.67%) than a baseline method, demonstrating the effectiveness of using element-wise visual information.
%R 10.18653/v1/2020.splu-1.1
%U https://aclanthology.org/2020.splu-1.1
%U https://doi.org/10.18653/v1/2020.splu-1.1
%P 1-9
Markdown (Informal)
[An Element-wise Visual-enhanced BiLSTM-CRF Model for Location Name Recognition](https://aclanthology.org/2020.splu-1.1) (Komada & Inui, SpLU 2020)
ACL