@inproceedings{liu-etal-2023-lhs712ee,
title = "{LHS}712{EE} at {B}io{L}ay{S}umm 2023: Using {BART} and {LED} to summarize biomedical research articles",
author = "Liu, Quancheng and
Ren, Xiheng and
Vydiswaran, V.G.Vinod",
editor = "Demner-fushman, Dina and
Ananiadou, Sophia and
Cohen, Kevin",
booktitle = "The 22nd Workshop on Biomedical Natural Language Processing and BioNLP Shared Tasks",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.bionlp-1.66",
doi = "10.18653/v1/2023.bionlp-1.66",
pages = "620--624",
abstract = "As part of our participation in BioLaySumm 2023, we explored the use of large language models (LLMs) to automatically generate concise and readable summaries of biomedical research articles. We utilized pre-trained LLMs to fine-tune our summarization models on two provided datasets, and adapt them to the shared task within the constraints of training time and computational power. Our final models achieved very high relevance and factuality scores on the test set, and ranked among the top five models in the overall performance.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="liu-etal-2023-lhs712ee">
<titleInfo>
<title>LHS712EE at BioLaySumm 2023: Using BART and LED to summarize biomedical research articles</title>
</titleInfo>
<name type="personal">
<namePart type="given">Quancheng</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiheng</namePart>
<namePart type="family">Ren</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">V.G.Vinod</namePart>
<namePart type="family">Vydiswaran</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>The 22nd Workshop on Biomedical Natural Language Processing and BioNLP Shared Tasks</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dina</namePart>
<namePart type="family">Demner-fushman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sophia</namePart>
<namePart type="family">Ananiadou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="family">Cohen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Toronto, Canada</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>As part of our participation in BioLaySumm 2023, we explored the use of large language models (LLMs) to automatically generate concise and readable summaries of biomedical research articles. We utilized pre-trained LLMs to fine-tune our summarization models on two provided datasets, and adapt them to the shared task within the constraints of training time and computational power. Our final models achieved very high relevance and factuality scores on the test set, and ranked among the top five models in the overall performance.</abstract>
<identifier type="citekey">liu-etal-2023-lhs712ee</identifier>
<identifier type="doi">10.18653/v1/2023.bionlp-1.66</identifier>
<location>
<url>https://aclanthology.org/2023.bionlp-1.66</url>
</location>
<part>
<date>2023-07</date>
<extent unit="page">
<start>620</start>
<end>624</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T LHS712EE at BioLaySumm 2023: Using BART and LED to summarize biomedical research articles
%A Liu, Quancheng
%A Ren, Xiheng
%A Vydiswaran, V.G.Vinod
%Y Demner-fushman, Dina
%Y Ananiadou, Sophia
%Y Cohen, Kevin
%S The 22nd Workshop on Biomedical Natural Language Processing and BioNLP Shared Tasks
%D 2023
%8 July
%I Association for Computational Linguistics
%C Toronto, Canada
%F liu-etal-2023-lhs712ee
%X As part of our participation in BioLaySumm 2023, we explored the use of large language models (LLMs) to automatically generate concise and readable summaries of biomedical research articles. We utilized pre-trained LLMs to fine-tune our summarization models on two provided datasets, and adapt them to the shared task within the constraints of training time and computational power. Our final models achieved very high relevance and factuality scores on the test set, and ranked among the top five models in the overall performance.
%R 10.18653/v1/2023.bionlp-1.66
%U https://aclanthology.org/2023.bionlp-1.66
%U https://doi.org/10.18653/v1/2023.bionlp-1.66
%P 620-624
Markdown (Informal)
[LHS712EE at BioLaySumm 2023: Using BART and LED to summarize biomedical research articles](https://aclanthology.org/2023.bionlp-1.66) (Liu et al., BioNLP 2023)
ACL