@inproceedings{masud-etal-2024-hate,
title = "Hate Personified: Investigating the role of {LLM}s in content moderation",
author = "Masud, Sarah and
Singh, Sahajpreet and
Hangya, Viktor and
Fraser, Alexander and
Chakraborty, Tanmoy",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.emnlp-main.886/",
doi = "10.18653/v1/2024.emnlp-main.886",
pages = "15847--15863",
abstract = "For subjective tasks such as hate detection, where people perceive hate differently, the Large Language Model`s (LLM) ability to represent diverse groups is unclear. By including additional context in prompts, we comprehensively analyze LLM`s sensitivity to geographical priming, persona attributes, and numerical information to assess how well the needs of various groups are reflected. Our findings on two LLMs, five languages, and six datasets reveal that mimicking persona-based attributes leads to annotation variability. Meanwhile, incorporating geographical signals leads to better regional alignment. We also find that the LLMs are sensitive to numerical anchors, indicating the ability to leverage community-based flagging efforts and exposure to adversaries. Our work provides preliminary guidelines and highlights the nuances of applying LLMs in culturally sensitive cases."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="masud-etal-2024-hate">
<titleInfo>
<title>Hate Personified: Investigating the role of LLMs in content moderation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sarah</namePart>
<namePart type="family">Masud</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sahajpreet</namePart>
<namePart type="family">Singh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Viktor</namePart>
<namePart type="family">Hangya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexander</namePart>
<namePart type="family">Fraser</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yaser</namePart>
<namePart type="family">Al-Onaizan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Bansal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>For subjective tasks such as hate detection, where people perceive hate differently, the Large Language Model‘s (LLM) ability to represent diverse groups is unclear. By including additional context in prompts, we comprehensively analyze LLM‘s sensitivity to geographical priming, persona attributes, and numerical information to assess how well the needs of various groups are reflected. Our findings on two LLMs, five languages, and six datasets reveal that mimicking persona-based attributes leads to annotation variability. Meanwhile, incorporating geographical signals leads to better regional alignment. We also find that the LLMs are sensitive to numerical anchors, indicating the ability to leverage community-based flagging efforts and exposure to adversaries. Our work provides preliminary guidelines and highlights the nuances of applying LLMs in culturally sensitive cases.</abstract>
<identifier type="citekey">masud-etal-2024-hate</identifier>
<identifier type="doi">10.18653/v1/2024.emnlp-main.886</identifier>
<location>
<url>https://aclanthology.org/2024.emnlp-main.886/</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>15847</start>
<end>15863</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Hate Personified: Investigating the role of LLMs in content moderation
%A Masud, Sarah
%A Singh, Sahajpreet
%A Hangya, Viktor
%A Fraser, Alexander
%A Chakraborty, Tanmoy
%Y Al-Onaizan, Yaser
%Y Bansal, Mohit
%Y Chen, Yun-Nung
%S Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, USA
%F masud-etal-2024-hate
%X For subjective tasks such as hate detection, where people perceive hate differently, the Large Language Model‘s (LLM) ability to represent diverse groups is unclear. By including additional context in prompts, we comprehensively analyze LLM‘s sensitivity to geographical priming, persona attributes, and numerical information to assess how well the needs of various groups are reflected. Our findings on two LLMs, five languages, and six datasets reveal that mimicking persona-based attributes leads to annotation variability. Meanwhile, incorporating geographical signals leads to better regional alignment. We also find that the LLMs are sensitive to numerical anchors, indicating the ability to leverage community-based flagging efforts and exposure to adversaries. Our work provides preliminary guidelines and highlights the nuances of applying LLMs in culturally sensitive cases.
%R 10.18653/v1/2024.emnlp-main.886
%U https://aclanthology.org/2024.emnlp-main.886/
%U https://doi.org/10.18653/v1/2024.emnlp-main.886
%P 15847-15863
Markdown (Informal)
[Hate Personified: Investigating the role of LLMs in content moderation](https://aclanthology.org/2024.emnlp-main.886/) (Masud et al., EMNLP 2024)
ACL