@inproceedings{urbizu-etal-2019-deep,
title = "Deep Cross-Lingual Coreference Resolution for Less-Resourced Languages: The Case of {B}asque",
author = "Urbizu, Gorka and
Soraluze, Ander and
Arregi, Olatz",
editor = "Ogrodniczuk, Maciej and
Pradhan, Sameer and
Grishina, Yulia and
Ng, Vincent",
booktitle = "Proceedings of the Second Workshop on Computational Models of Reference, Anaphora and Coreference",
month = jun,
year = "2019",
address = "Minneapolis, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W19-2806",
doi = "10.18653/v1/W19-2806",
pages = "35--41",
abstract = "In this paper, we present a cross-lingual neural coreference resolution system for a less-resourced language such as Basque. To begin with, we build the first neural coreference resolution system for Basque, training it with the relatively small EPEC-KORREF corpus (45,000 words). Next, a cross-lingual coreference resolution system is designed. With this approach, the system learns from a bigger English corpus, using cross-lingual embeddings, to perform the coreference resolution for Basque. The cross-lingual system obtains slightly better results (40.93 F1 CoNLL) than the monolingual system (39.12 F1 CoNLL), without using any Basque language corpus to train it.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="urbizu-etal-2019-deep">
<titleInfo>
<title>Deep Cross-Lingual Coreference Resolution for Less-Resourced Languages: The Case of Basque</title>
</titleInfo>
<name type="personal">
<namePart type="given">Gorka</namePart>
<namePart type="family">Urbizu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ander</namePart>
<namePart type="family">Soraluze</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Olatz</namePart>
<namePart type="family">Arregi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on Computational Models of Reference, Anaphora and Coreference</title>
</titleInfo>
<name type="personal">
<namePart type="given">Maciej</namePart>
<namePart type="family">Ogrodniczuk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sameer</namePart>
<namePart type="family">Pradhan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yulia</namePart>
<namePart type="family">Grishina</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vincent</namePart>
<namePart type="family">Ng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Minneapolis, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper, we present a cross-lingual neural coreference resolution system for a less-resourced language such as Basque. To begin with, we build the first neural coreference resolution system for Basque, training it with the relatively small EPEC-KORREF corpus (45,000 words). Next, a cross-lingual coreference resolution system is designed. With this approach, the system learns from a bigger English corpus, using cross-lingual embeddings, to perform the coreference resolution for Basque. The cross-lingual system obtains slightly better results (40.93 F1 CoNLL) than the monolingual system (39.12 F1 CoNLL), without using any Basque language corpus to train it.</abstract>
<identifier type="citekey">urbizu-etal-2019-deep</identifier>
<identifier type="doi">10.18653/v1/W19-2806</identifier>
<location>
<url>https://aclanthology.org/W19-2806</url>
</location>
<part>
<date>2019-06</date>
<extent unit="page">
<start>35</start>
<end>41</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Deep Cross-Lingual Coreference Resolution for Less-Resourced Languages: The Case of Basque
%A Urbizu, Gorka
%A Soraluze, Ander
%A Arregi, Olatz
%Y Ogrodniczuk, Maciej
%Y Pradhan, Sameer
%Y Grishina, Yulia
%Y Ng, Vincent
%S Proceedings of the Second Workshop on Computational Models of Reference, Anaphora and Coreference
%D 2019
%8 June
%I Association for Computational Linguistics
%C Minneapolis, USA
%F urbizu-etal-2019-deep
%X In this paper, we present a cross-lingual neural coreference resolution system for a less-resourced language such as Basque. To begin with, we build the first neural coreference resolution system for Basque, training it with the relatively small EPEC-KORREF corpus (45,000 words). Next, a cross-lingual coreference resolution system is designed. With this approach, the system learns from a bigger English corpus, using cross-lingual embeddings, to perform the coreference resolution for Basque. The cross-lingual system obtains slightly better results (40.93 F1 CoNLL) than the monolingual system (39.12 F1 CoNLL), without using any Basque language corpus to train it.
%R 10.18653/v1/W19-2806
%U https://aclanthology.org/W19-2806
%U https://doi.org/10.18653/v1/W19-2806
%P 35-41
Markdown (Informal)
[Deep Cross-Lingual Coreference Resolution for Less-Resourced Languages: The Case of Basque](https://aclanthology.org/W19-2806) (Urbizu et al., CRAC 2019)
ACL