@inproceedings{breitwieser-2022-contextualizing,
title = "Can Contextualizing User Embeddings Improve Sarcasm and Hate Speech Detection?",
author = "Breitwieser, Kim",
editor = "Bamman, David and
Hovy, Dirk and
Jurgens, David and
Keith, Katherine and
O'Connor, Brendan and
Volkova, Svitlana",
booktitle = "Proceedings of the Fifth Workshop on Natural Language Processing and Computational Social Science (NLP+CSS)",
month = nov,
year = "2022",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.nlpcss-1.14/",
doi = "10.18653/v1/2022.nlpcss-1.14",
pages = "126--139",
abstract = "While implicit embeddings so far have been mostly concerned with creating an overall representation of the user, we evaluate a different approach. By only considering content directed at a specific topic, we create sub-user embeddings, and measure their usefulness on the tasks of sarcasm and hate speech detection. In doing so, we show that task-related topics can have a noticeable effect on model performance, especially when dealing with intended expressions like sarcasm, but less so for hate speech, which is usually labelled as such on the receiving end."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="breitwieser-2022-contextualizing">
<titleInfo>
<title>Can Contextualizing User Embeddings Improve Sarcasm and Hate Speech Detection?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kim</namePart>
<namePart type="family">Breitwieser</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fifth Workshop on Natural Language Processing and Computational Social Science (NLP+CSS)</title>
</titleInfo>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Bamman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dirk</namePart>
<namePart type="family">Hovy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Jurgens</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Katherine</namePart>
<namePart type="family">Keith</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Brendan</namePart>
<namePart type="family">O’Connor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Svitlana</namePart>
<namePart type="family">Volkova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, UAE</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>While implicit embeddings so far have been mostly concerned with creating an overall representation of the user, we evaluate a different approach. By only considering content directed at a specific topic, we create sub-user embeddings, and measure their usefulness on the tasks of sarcasm and hate speech detection. In doing so, we show that task-related topics can have a noticeable effect on model performance, especially when dealing with intended expressions like sarcasm, but less so for hate speech, which is usually labelled as such on the receiving end.</abstract>
<identifier type="citekey">breitwieser-2022-contextualizing</identifier>
<identifier type="doi">10.18653/v1/2022.nlpcss-1.14</identifier>
<location>
<url>https://aclanthology.org/2022.nlpcss-1.14/</url>
</location>
<part>
<date>2022-11</date>
<extent unit="page">
<start>126</start>
<end>139</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Can Contextualizing User Embeddings Improve Sarcasm and Hate Speech Detection?
%A Breitwieser, Kim
%Y Bamman, David
%Y Hovy, Dirk
%Y Jurgens, David
%Y Keith, Katherine
%Y O’Connor, Brendan
%Y Volkova, Svitlana
%S Proceedings of the Fifth Workshop on Natural Language Processing and Computational Social Science (NLP+CSS)
%D 2022
%8 November
%I Association for Computational Linguistics
%C Abu Dhabi, UAE
%F breitwieser-2022-contextualizing
%X While implicit embeddings so far have been mostly concerned with creating an overall representation of the user, we evaluate a different approach. By only considering content directed at a specific topic, we create sub-user embeddings, and measure their usefulness on the tasks of sarcasm and hate speech detection. In doing so, we show that task-related topics can have a noticeable effect on model performance, especially when dealing with intended expressions like sarcasm, but less so for hate speech, which is usually labelled as such on the receiving end.
%R 10.18653/v1/2022.nlpcss-1.14
%U https://aclanthology.org/2022.nlpcss-1.14/
%U https://doi.org/10.18653/v1/2022.nlpcss-1.14
%P 126-139
Markdown (Informal)
[Can Contextualizing User Embeddings Improve Sarcasm and Hate Speech Detection?](https://aclanthology.org/2022.nlpcss-1.14/) (Breitwieser, NLP+CSS 2022)
ACL