@inproceedings{baez-saggion-2023-lsllama,
title = "{LSL}lama: Fine-Tuned {LL}a{MA} for Lexical Simplification",
author = "Baez, Anthony and
Saggion, Horacio",
editor = "{\v{S}}tajner, Sanja and
Saggio, Horacio and
Shardlow, Matthew and
Alva-Manchego, Fernando",
booktitle = "Proceedings of the Second Workshop on Text Simplification, Accessibility and Readability",
month = sep,
year = "2023",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd., Shoumen, Bulgaria",
url = "https://aclanthology.org/2023.tsar-1.10/",
pages = "102--108",
abstract = "Generative Large Language Models (LLMs), such as GPT-3, have become increasingly effective and versatile in natural language processing (NLP) tasks. One such task is Lexical Simplification, where state-of-the-art methods involve complex, multi-step processes which can use both deep learning and non-deep learning processes. LLaMA, an LLM with full research access, holds unique potential for the adaption of the entire LS pipeline. This paper details the process of fine-tuning LLaMA to create LSLlama, which performs comparably to previous LS baseline models LSBert and UniHD."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="baez-saggion-2023-lsllama">
<titleInfo>
<title>LSLlama: Fine-Tuned LLaMA for Lexical Simplification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anthony</namePart>
<namePart type="family">Baez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Horacio</namePart>
<namePart type="family">Saggion</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on Text Simplification, Accessibility and Readability</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sanja</namePart>
<namePart type="family">Štajner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Horacio</namePart>
<namePart type="family">Saggio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Matthew</namePart>
<namePart type="family">Shardlow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fernando</namePart>
<namePart type="family">Alva-Manchego</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>INCOMA Ltd., Shoumen, Bulgaria</publisher>
<place>
<placeTerm type="text">Varna, Bulgaria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Generative Large Language Models (LLMs), such as GPT-3, have become increasingly effective and versatile in natural language processing (NLP) tasks. One such task is Lexical Simplification, where state-of-the-art methods involve complex, multi-step processes which can use both deep learning and non-deep learning processes. LLaMA, an LLM with full research access, holds unique potential for the adaption of the entire LS pipeline. This paper details the process of fine-tuning LLaMA to create LSLlama, which performs comparably to previous LS baseline models LSBert and UniHD.</abstract>
<identifier type="citekey">baez-saggion-2023-lsllama</identifier>
<location>
<url>https://aclanthology.org/2023.tsar-1.10/</url>
</location>
<part>
<date>2023-09</date>
<extent unit="page">
<start>102</start>
<end>108</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T LSLlama: Fine-Tuned LLaMA for Lexical Simplification
%A Baez, Anthony
%A Saggion, Horacio
%Y Štajner, Sanja
%Y Saggio, Horacio
%Y Shardlow, Matthew
%Y Alva-Manchego, Fernando
%S Proceedings of the Second Workshop on Text Simplification, Accessibility and Readability
%D 2023
%8 September
%I INCOMA Ltd., Shoumen, Bulgaria
%C Varna, Bulgaria
%F baez-saggion-2023-lsllama
%X Generative Large Language Models (LLMs), such as GPT-3, have become increasingly effective and versatile in natural language processing (NLP) tasks. One such task is Lexical Simplification, where state-of-the-art methods involve complex, multi-step processes which can use both deep learning and non-deep learning processes. LLaMA, an LLM with full research access, holds unique potential for the adaption of the entire LS pipeline. This paper details the process of fine-tuning LLaMA to create LSLlama, which performs comparably to previous LS baseline models LSBert and UniHD.
%U https://aclanthology.org/2023.tsar-1.10/
%P 102-108
Markdown (Informal)
[LSLlama: Fine-Tuned LLaMA for Lexical Simplification](https://aclanthology.org/2023.tsar-1.10/) (Baez & Saggion, TSAR 2023)
ACL