@inproceedings{reeder-2001-one,
title = "In one hundred words or less",
author = "Reeder, Florence",
editor = "Hovy, Eduard and
King, Margaret and
Manzi, Sandra and
Reeder, Florence",
booktitle = "Workshop on MT Evaluation",
month = sep # " 18-22",
year = "2001",
address = "Santiago de Compostela, Spain",
url = "https://aclanthology.org/2001.mtsummit-eval.7/",
abstract = "This paper reports on research which aims to test the efficacy of applying automated evaluation techniques, originally designed for human second language learners, to machine translation (MT) system evaluation. We believe that such evaluation techniques will provide insight into MT evaluation, MT development, the human translation process and the human language learning process. The experiment described here looks only at the intelligibility of MT output. The evaluation technique is derived from a second language acquisition experiment that showed that assessors can differentiate native from non-native language essays in less than 100 words. Particularly illuminating for our purposes is the set of factor on which the assessors made their decisions. We duplicated this experiment to see if similar criteria could be elicited from duplicating the test using both human and machine translation outputs in the decision set. The encouraging results of this experiment, along with an analysis of language factors contributing to the successful outcomes, is presented here."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="reeder-2001-one">
<titleInfo>
<title>In one hundred words or less</title>
</titleInfo>
<name type="personal">
<namePart type="given">Florence</namePart>
<namePart type="family">Reeder</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2001-sep 18-22</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Workshop on MT Evaluation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Eduard</namePart>
<namePart type="family">Hovy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Margaret</namePart>
<namePart type="family">King</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sandra</namePart>
<namePart type="family">Manzi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Florence</namePart>
<namePart type="family">Reeder</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<place>
<placeTerm type="text">Santiago de Compostela, Spain</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper reports on research which aims to test the efficacy of applying automated evaluation techniques, originally designed for human second language learners, to machine translation (MT) system evaluation. We believe that such evaluation techniques will provide insight into MT evaluation, MT development, the human translation process and the human language learning process. The experiment described here looks only at the intelligibility of MT output. The evaluation technique is derived from a second language acquisition experiment that showed that assessors can differentiate native from non-native language essays in less than 100 words. Particularly illuminating for our purposes is the set of factor on which the assessors made their decisions. We duplicated this experiment to see if similar criteria could be elicited from duplicating the test using both human and machine translation outputs in the decision set. The encouraging results of this experiment, along with an analysis of language factors contributing to the successful outcomes, is presented here.</abstract>
<identifier type="citekey">reeder-2001-one</identifier>
<location>
<url>https://aclanthology.org/2001.mtsummit-eval.7/</url>
</location>
<part>
<date>2001-sep 18-22</date>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T In one hundred words or less
%A Reeder, Florence
%Y Hovy, Eduard
%Y King, Margaret
%Y Manzi, Sandra
%Y Reeder, Florence
%S Workshop on MT Evaluation
%D 2001
%8 sep 18 22
%C Santiago de Compostela, Spain
%F reeder-2001-one
%X This paper reports on research which aims to test the efficacy of applying automated evaluation techniques, originally designed for human second language learners, to machine translation (MT) system evaluation. We believe that such evaluation techniques will provide insight into MT evaluation, MT development, the human translation process and the human language learning process. The experiment described here looks only at the intelligibility of MT output. The evaluation technique is derived from a second language acquisition experiment that showed that assessors can differentiate native from non-native language essays in less than 100 words. Particularly illuminating for our purposes is the set of factor on which the assessors made their decisions. We duplicated this experiment to see if similar criteria could be elicited from duplicating the test using both human and machine translation outputs in the decision set. The encouraging results of this experiment, along with an analysis of language factors contributing to the successful outcomes, is presented here.
%U https://aclanthology.org/2001.mtsummit-eval.7/
Markdown (Informal)
[In one hundred words or less](https://aclanthology.org/2001.mtsummit-eval.7/) (Reeder, MTSummit 2001)
ACL