@inproceedings{villmow-etal-2021-contest,
title = "{C}on{T}est: A Unit Test Completion Benchmark featuring Context",
author = "Villmow, Johannes and
Depoix, Jonas and
Ulges, Adrian",
editor = "Lachmy, Royi and
Yao, Ziyu and
Durrett, Greg and
Gligoric, Milos and
Li, Junyi Jessy and
Mooney, Ray and
Neubig, Graham and
Su, Yu and
Sun, Huan and
Tsarfaty, Reut",
booktitle = "Proceedings of the 1st Workshop on Natural Language Processing for Programming (NLP4Prog 2021)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.nlp4prog-1.2/",
doi = "10.18653/v1/2021.nlp4prog-1.2",
pages = "17--25",
abstract = "We introduce CONTEST, a benchmark for NLP-based unit test completion, the task of predicting a test`s assert statements given its setup and focal method, i.e. the method to be tested. ConTest is large-scale (with 365k datapoints). Besides the test code and tested code, it also features context code called by either. We found context to be crucial for accurately predicting assertions. We also introduce baselines based on transformer encoder-decoders, and study the effects of including syntactic information and context. Overall, our models achieve a BLEU score of 38.2, while only generating unparsable code in 1.92{\%} of cases."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="villmow-etal-2021-contest">
<titleInfo>
<title>ConTest: A Unit Test Completion Benchmark featuring Context</title>
</titleInfo>
<name type="personal">
<namePart type="given">Johannes</namePart>
<namePart type="family">Villmow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jonas</namePart>
<namePart type="family">Depoix</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Adrian</namePart>
<namePart type="family">Ulges</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on Natural Language Processing for Programming (NLP4Prog 2021)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Royi</namePart>
<namePart type="family">Lachmy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ziyu</namePart>
<namePart type="family">Yao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Greg</namePart>
<namePart type="family">Durrett</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Milos</namePart>
<namePart type="family">Gligoric</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Junyi</namePart>
<namePart type="given">Jessy</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ray</namePart>
<namePart type="family">Mooney</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Graham</namePart>
<namePart type="family">Neubig</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yu</namePart>
<namePart type="family">Su</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Huan</namePart>
<namePart type="family">Sun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Reut</namePart>
<namePart type="family">Tsarfaty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We introduce CONTEST, a benchmark for NLP-based unit test completion, the task of predicting a test‘s assert statements given its setup and focal method, i.e. the method to be tested. ConTest is large-scale (with 365k datapoints). Besides the test code and tested code, it also features context code called by either. We found context to be crucial for accurately predicting assertions. We also introduce baselines based on transformer encoder-decoders, and study the effects of including syntactic information and context. Overall, our models achieve a BLEU score of 38.2, while only generating unparsable code in 1.92% of cases.</abstract>
<identifier type="citekey">villmow-etal-2021-contest</identifier>
<identifier type="doi">10.18653/v1/2021.nlp4prog-1.2</identifier>
<location>
<url>https://aclanthology.org/2021.nlp4prog-1.2/</url>
</location>
<part>
<date>2021-08</date>
<extent unit="page">
<start>17</start>
<end>25</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T ConTest: A Unit Test Completion Benchmark featuring Context
%A Villmow, Johannes
%A Depoix, Jonas
%A Ulges, Adrian
%Y Lachmy, Royi
%Y Yao, Ziyu
%Y Durrett, Greg
%Y Gligoric, Milos
%Y Li, Junyi Jessy
%Y Mooney, Ray
%Y Neubig, Graham
%Y Su, Yu
%Y Sun, Huan
%Y Tsarfaty, Reut
%S Proceedings of the 1st Workshop on Natural Language Processing for Programming (NLP4Prog 2021)
%D 2021
%8 August
%I Association for Computational Linguistics
%C Online
%F villmow-etal-2021-contest
%X We introduce CONTEST, a benchmark for NLP-based unit test completion, the task of predicting a test‘s assert statements given its setup and focal method, i.e. the method to be tested. ConTest is large-scale (with 365k datapoints). Besides the test code and tested code, it also features context code called by either. We found context to be crucial for accurately predicting assertions. We also introduce baselines based on transformer encoder-decoders, and study the effects of including syntactic information and context. Overall, our models achieve a BLEU score of 38.2, while only generating unparsable code in 1.92% of cases.
%R 10.18653/v1/2021.nlp4prog-1.2
%U https://aclanthology.org/2021.nlp4prog-1.2/
%U https://doi.org/10.18653/v1/2021.nlp4prog-1.2
%P 17-25
Markdown (Informal)
[ConTest: A Unit Test Completion Benchmark featuring Context](https://aclanthology.org/2021.nlp4prog-1.2/) (Villmow et al., NLP4Prog 2021)
ACL