@inproceedings{brasoveanu-dotlacil-2020-production,
title = "Production-based Cognitive Models as a Test Suite for Reinforcement Learning Algorithms",
author = "Brasoveanu, Adrian and
Dotlacil, Jakub",
editor = "Chersoni, Emmanuele and
Jacobs, Cassandra and
Oseki, Yohei and
Pr{\'e}vot, Laurent and
Santus, Enrico",
booktitle = "Proceedings of the Workshop on Cognitive Modeling and Computational Linguistics",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.cmcl-1.3",
doi = "10.18653/v1/2020.cmcl-1.3",
pages = "28--37",
abstract = "We introduce a framework in which production-rule based computational cognitive modeling and Reinforcement Learning can systematically interact and inform each other. We focus on linguistic applications because the sophisticated rule-based cognitive models needed to capture linguistic behavioral data promise to provide a stringent test suite for RL algorithms, connecting RL algorithms to both accuracy and reaction-time experimental data. Thus, we open a path towards assembling an experimentally rigorous and cognitively realistic benchmark for RL algorithms. We extend our previous work on lexical decision tasks and tabular RL algorithms (Brasoveanu and Dotla{\v{c}}il, 2020b) with a discussion of neural-network based approaches, and a discussion of how parsing can be formalized as an RL problem.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="brasoveanu-dotlacil-2020-production">
<titleInfo>
<title>Production-based Cognitive Models as a Test Suite for Reinforcement Learning Algorithms</title>
</titleInfo>
<name type="personal">
<namePart type="given">Adrian</namePart>
<namePart type="family">Brasoveanu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jakub</namePart>
<namePart type="family">Dotlacil</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Workshop on Cognitive Modeling and Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Emmanuele</namePart>
<namePart type="family">Chersoni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Cassandra</namePart>
<namePart type="family">Jacobs</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yohei</namePart>
<namePart type="family">Oseki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Laurent</namePart>
<namePart type="family">Prévot</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Enrico</namePart>
<namePart type="family">Santus</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We introduce a framework in which production-rule based computational cognitive modeling and Reinforcement Learning can systematically interact and inform each other. We focus on linguistic applications because the sophisticated rule-based cognitive models needed to capture linguistic behavioral data promise to provide a stringent test suite for RL algorithms, connecting RL algorithms to both accuracy and reaction-time experimental data. Thus, we open a path towards assembling an experimentally rigorous and cognitively realistic benchmark for RL algorithms. We extend our previous work on lexical decision tasks and tabular RL algorithms (Brasoveanu and Dotlačil, 2020b) with a discussion of neural-network based approaches, and a discussion of how parsing can be formalized as an RL problem.</abstract>
<identifier type="citekey">brasoveanu-dotlacil-2020-production</identifier>
<identifier type="doi">10.18653/v1/2020.cmcl-1.3</identifier>
<location>
<url>https://aclanthology.org/2020.cmcl-1.3</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>28</start>
<end>37</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Production-based Cognitive Models as a Test Suite for Reinforcement Learning Algorithms
%A Brasoveanu, Adrian
%A Dotlacil, Jakub
%Y Chersoni, Emmanuele
%Y Jacobs, Cassandra
%Y Oseki, Yohei
%Y Prévot, Laurent
%Y Santus, Enrico
%S Proceedings of the Workshop on Cognitive Modeling and Computational Linguistics
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F brasoveanu-dotlacil-2020-production
%X We introduce a framework in which production-rule based computational cognitive modeling and Reinforcement Learning can systematically interact and inform each other. We focus on linguistic applications because the sophisticated rule-based cognitive models needed to capture linguistic behavioral data promise to provide a stringent test suite for RL algorithms, connecting RL algorithms to both accuracy and reaction-time experimental data. Thus, we open a path towards assembling an experimentally rigorous and cognitively realistic benchmark for RL algorithms. We extend our previous work on lexical decision tasks and tabular RL algorithms (Brasoveanu and Dotlačil, 2020b) with a discussion of neural-network based approaches, and a discussion of how parsing can be formalized as an RL problem.
%R 10.18653/v1/2020.cmcl-1.3
%U https://aclanthology.org/2020.cmcl-1.3
%U https://doi.org/10.18653/v1/2020.cmcl-1.3
%P 28-37
Markdown (Informal)
[Production-based Cognitive Models as a Test Suite for Reinforcement Learning Algorithms](https://aclanthology.org/2020.cmcl-1.3) (Brasoveanu & Dotlacil, CMCL 2020)
ACL