@inproceedings{griffin-etal-2023-large,
title = "Large Language Models respond to Influence like Humans",
author = "Griffin, Lewis and
Kleinberg, Bennett and
Mozes, Maximilian and
Mai, Kimberly and
Vau, Maria Do Mar and
Caldwell, Matthew and
Mavor-Parker, Augustine",
editor = "Chawla, Kushal and
Shi, Weiyan",
booktitle = "Proceedings of the First Workshop on Social Influence in Conversations (SICon 2023)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.sicon-1.3",
doi = "10.18653/v1/2023.sicon-1.3",
pages = "15--24",
abstract = "Two studies tested the hypothesis that a Large Language Model (LLM) can be used to model psychological change following exposure to influential input. The first study tested a generic mode of influence - the Illusory Truth Effect (ITE) - where earlier exposure to a statement boosts a later truthfulness test rating. Analysis of newly collected data from human and LLM-simulated subjects (1000 of each) showed the same pattern of effects in both populations; although with greater per statement variability for the LLM. The second study concerns a specific mode of influence {--} populist framing of news to increase its persuasion and political mobilization. Newly collected data from simulated subjects was compared to previously published data from a 15 country experiment on 7286 human participants. Several effects from the human study were replicated by the simulated study, including ones that surprised the authors of the human study by contradicting their theoretical expectations; but some significant relationships found in human data were not present in the LLM data. Together the two studies support the view that LLMs have potential to act as models of the effect of influence.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="griffin-etal-2023-large">
<titleInfo>
<title>Large Language Models respond to Influence like Humans</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lewis</namePart>
<namePart type="family">Griffin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bennett</namePart>
<namePart type="family">Kleinberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maximilian</namePart>
<namePart type="family">Mozes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kimberly</namePart>
<namePart type="family">Mai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maria</namePart>
<namePart type="given">Do</namePart>
<namePart type="given">Mar</namePart>
<namePart type="family">Vau</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Matthew</namePart>
<namePart type="family">Caldwell</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Augustine</namePart>
<namePart type="family">Mavor-Parker</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Social Influence in Conversations (SICon 2023)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kushal</namePart>
<namePart type="family">Chawla</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Weiyan</namePart>
<namePart type="family">Shi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Toronto, Canada</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Two studies tested the hypothesis that a Large Language Model (LLM) can be used to model psychological change following exposure to influential input. The first study tested a generic mode of influence - the Illusory Truth Effect (ITE) - where earlier exposure to a statement boosts a later truthfulness test rating. Analysis of newly collected data from human and LLM-simulated subjects (1000 of each) showed the same pattern of effects in both populations; although with greater per statement variability for the LLM. The second study concerns a specific mode of influence – populist framing of news to increase its persuasion and political mobilization. Newly collected data from simulated subjects was compared to previously published data from a 15 country experiment on 7286 human participants. Several effects from the human study were replicated by the simulated study, including ones that surprised the authors of the human study by contradicting their theoretical expectations; but some significant relationships found in human data were not present in the LLM data. Together the two studies support the view that LLMs have potential to act as models of the effect of influence.</abstract>
<identifier type="citekey">griffin-etal-2023-large</identifier>
<identifier type="doi">10.18653/v1/2023.sicon-1.3</identifier>
<location>
<url>https://aclanthology.org/2023.sicon-1.3</url>
</location>
<part>
<date>2023-07</date>
<extent unit="page">
<start>15</start>
<end>24</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Large Language Models respond to Influence like Humans
%A Griffin, Lewis
%A Kleinberg, Bennett
%A Mozes, Maximilian
%A Mai, Kimberly
%A Vau, Maria Do Mar
%A Caldwell, Matthew
%A Mavor-Parker, Augustine
%Y Chawla, Kushal
%Y Shi, Weiyan
%S Proceedings of the First Workshop on Social Influence in Conversations (SICon 2023)
%D 2023
%8 July
%I Association for Computational Linguistics
%C Toronto, Canada
%F griffin-etal-2023-large
%X Two studies tested the hypothesis that a Large Language Model (LLM) can be used to model psychological change following exposure to influential input. The first study tested a generic mode of influence - the Illusory Truth Effect (ITE) - where earlier exposure to a statement boosts a later truthfulness test rating. Analysis of newly collected data from human and LLM-simulated subjects (1000 of each) showed the same pattern of effects in both populations; although with greater per statement variability for the LLM. The second study concerns a specific mode of influence – populist framing of news to increase its persuasion and political mobilization. Newly collected data from simulated subjects was compared to previously published data from a 15 country experiment on 7286 human participants. Several effects from the human study were replicated by the simulated study, including ones that surprised the authors of the human study by contradicting their theoretical expectations; but some significant relationships found in human data were not present in the LLM data. Together the two studies support the view that LLMs have potential to act as models of the effect of influence.
%R 10.18653/v1/2023.sicon-1.3
%U https://aclanthology.org/2023.sicon-1.3
%U https://doi.org/10.18653/v1/2023.sicon-1.3
%P 15-24
Markdown (Informal)
[Large Language Models respond to Influence like Humans](https://aclanthology.org/2023.sicon-1.3) (Griffin et al., SICon 2023)
ACL
- Lewis Griffin, Bennett Kleinberg, Maximilian Mozes, Kimberly Mai, Maria Do Mar Vau, Matthew Caldwell, and Augustine Mavor-Parker. 2023. Large Language Models respond to Influence like Humans. In Proceedings of the First Workshop on Social Influence in Conversations (SICon 2023), pages 15–24, Toronto, Canada. Association for Computational Linguistics.