@inproceedings{qasemizadeh-etal-2017-projection,
title = "Projection Al{\'e}atoire Non-N{\'e}gative pour le Calcul de Word Embedding / Non-Negative Randomized Word Embedding",
author = "Qasemizadeh, Behrang and
Kallmeyer, Laura and
Herbelot, Aurelie",
editor = "Eshkol-Taravella, Iris and
Antoine, Jean-Yves",
booktitle = "Actes des 24{\`e}me Conf{\'e}rence sur le Traitement Automatique des Langues Naturelles. Volume 1 - Articles longs",
month = "6",
year = "2017",
address = "Orl{\'e}ans, France",
publisher = "ATALA",
url = "https://aclanthology.org/2017.jeptalnrecital-long.8/",
pages = "109--122",
abstract = "Non-Negative Randomized Word Embedding We propose a word embedding method which is based on a novel random projection technique. We show that weighting methods such as positive pointwise mutual information (PPMI) can be applied to our models after their construction and at a reduced dimensionality. Hence, the proposed technique can efficiently transfer words onto semantically discriminative spaces while demonstrating high computational performance, besides benefits such as ease of update and a simple mechanism for interoperability. We report the performance of our method on several tasks and show that it yields competitive results compared to neural embedding methods in monolingual corpus-based setups."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="qasemizadeh-etal-2017-projection">
<titleInfo>
<title>Projection Aléatoire Non-Négative pour le Calcul de Word Embedding / Non-Negative Randomized Word Embedding</title>
</titleInfo>
<name type="personal">
<namePart type="given">Behrang</namePart>
<namePart type="family">Qasemizadeh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Laura</namePart>
<namePart type="family">Kallmeyer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aurelie</namePart>
<namePart type="family">Herbelot</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Actes des 24ème Conférence sur le Traitement Automatique des Langues Naturelles. Volume 1 - Articles longs</title>
</titleInfo>
<name type="personal">
<namePart type="given">Iris</namePart>
<namePart type="family">Eshkol-Taravella</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jean-Yves</namePart>
<namePart type="family">Antoine</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>ATALA</publisher>
<place>
<placeTerm type="text">Orléans, France</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Non-Negative Randomized Word Embedding We propose a word embedding method which is based on a novel random projection technique. We show that weighting methods such as positive pointwise mutual information (PPMI) can be applied to our models after their construction and at a reduced dimensionality. Hence, the proposed technique can efficiently transfer words onto semantically discriminative spaces while demonstrating high computational performance, besides benefits such as ease of update and a simple mechanism for interoperability. We report the performance of our method on several tasks and show that it yields competitive results compared to neural embedding methods in monolingual corpus-based setups.</abstract>
<identifier type="citekey">qasemizadeh-etal-2017-projection</identifier>
<location>
<url>https://aclanthology.org/2017.jeptalnrecital-long.8/</url>
</location>
<part>
<date>2017-6</date>
<extent unit="page">
<start>109</start>
<end>122</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Projection Aléatoire Non-Négative pour le Calcul de Word Embedding / Non-Negative Randomized Word Embedding
%A Qasemizadeh, Behrang
%A Kallmeyer, Laura
%A Herbelot, Aurelie
%Y Eshkol-Taravella, Iris
%Y Antoine, Jean-Yves
%S Actes des 24ème Conférence sur le Traitement Automatique des Langues Naturelles. Volume 1 - Articles longs
%D 2017
%8 June
%I ATALA
%C Orléans, France
%F qasemizadeh-etal-2017-projection
%X Non-Negative Randomized Word Embedding We propose a word embedding method which is based on a novel random projection technique. We show that weighting methods such as positive pointwise mutual information (PPMI) can be applied to our models after their construction and at a reduced dimensionality. Hence, the proposed technique can efficiently transfer words onto semantically discriminative spaces while demonstrating high computational performance, besides benefits such as ease of update and a simple mechanism for interoperability. We report the performance of our method on several tasks and show that it yields competitive results compared to neural embedding methods in monolingual corpus-based setups.
%U https://aclanthology.org/2017.jeptalnrecital-long.8/
%P 109-122
Markdown (Informal)
[Projection Aléatoire Non-Négative pour le Calcul de Word Embedding / Non-Negative Randomized Word Embedding](https://aclanthology.org/2017.jeptalnrecital-long.8/) (Qasemizadeh et al., JEP/TALN/RECITAL 2017)
ACL