@inproceedings{anderson-2024-prompting-assignment,
title = "A Prompting Assignment for Exploring Pretrained {LLM}s",
author = "Anderson, Carolyn",
editor = {Al-azzawi, Sana and
Biester, Laura and
Kov{\'a}cs, Gy{\"o}rgy and
Marasovi{\'c}, Ana and
Mathur, Leena and
Mieskes, Margot and
Weissweiler, Leonie},
booktitle = "Proceedings of the Sixth Workshop on Teaching NLP",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.teachingnlp-1.12",
pages = "81--84",
abstract = "As the scale of publicly-available large language models (LLMs) has increased, so has interest in few-shot prompting methods. This paper presents an assignment that asks students to explore three aspects of large language model capabilities (commonsense reasoning, factuality, and wordplay) with a prompt engineering focus. The assignment consists of three tasks designed to share a common programming framework, so that students can reuse and adapt code from earlier tasks. Two of the tasks also involve dataset construction: students are asked to construct a simple dataset for the wordplay task, and a more challenging dataset for the factuality task. In addition, the assignment includes reflection questions that ask students to think critically about what they observe.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="anderson-2024-prompting-assignment">
<titleInfo>
<title>A Prompting Assignment for Exploring Pretrained LLMs</title>
</titleInfo>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Anderson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Sixth Workshop on Teaching NLP</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sana</namePart>
<namePart type="family">Al-azzawi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Laura</namePart>
<namePart type="family">Biester</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">György</namePart>
<namePart type="family">Kovács</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ana</namePart>
<namePart type="family">Marasović</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Leena</namePart>
<namePart type="family">Mathur</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Margot</namePart>
<namePart type="family">Mieskes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Leonie</namePart>
<namePart type="family">Weissweiler</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Bangkok, Thailand</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>As the scale of publicly-available large language models (LLMs) has increased, so has interest in few-shot prompting methods. This paper presents an assignment that asks students to explore three aspects of large language model capabilities (commonsense reasoning, factuality, and wordplay) with a prompt engineering focus. The assignment consists of three tasks designed to share a common programming framework, so that students can reuse and adapt code from earlier tasks. Two of the tasks also involve dataset construction: students are asked to construct a simple dataset for the wordplay task, and a more challenging dataset for the factuality task. In addition, the assignment includes reflection questions that ask students to think critically about what they observe.</abstract>
<identifier type="citekey">anderson-2024-prompting-assignment</identifier>
<location>
<url>https://aclanthology.org/2024.teachingnlp-1.12</url>
</location>
<part>
<date>2024-08</date>
<extent unit="page">
<start>81</start>
<end>84</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A Prompting Assignment for Exploring Pretrained LLMs
%A Anderson, Carolyn
%Y Al-azzawi, Sana
%Y Biester, Laura
%Y Kovács, György
%Y Marasović, Ana
%Y Mathur, Leena
%Y Mieskes, Margot
%Y Weissweiler, Leonie
%S Proceedings of the Sixth Workshop on Teaching NLP
%D 2024
%8 August
%I Association for Computational Linguistics
%C Bangkok, Thailand
%F anderson-2024-prompting-assignment
%X As the scale of publicly-available large language models (LLMs) has increased, so has interest in few-shot prompting methods. This paper presents an assignment that asks students to explore three aspects of large language model capabilities (commonsense reasoning, factuality, and wordplay) with a prompt engineering focus. The assignment consists of three tasks designed to share a common programming framework, so that students can reuse and adapt code from earlier tasks. Two of the tasks also involve dataset construction: students are asked to construct a simple dataset for the wordplay task, and a more challenging dataset for the factuality task. In addition, the assignment includes reflection questions that ask students to think critically about what they observe.
%U https://aclanthology.org/2024.teachingnlp-1.12
%P 81-84
Markdown (Informal)
[A Prompting Assignment for Exploring Pretrained LLMs](https://aclanthology.org/2024.teachingnlp-1.12) (Anderson, TeachingNLP-WS 2024)
ACL