@inproceedings{mohammadshahi-henderson-2023-syntax,
title = "Syntax-Aware Graph-to-Graph Transformer for Semantic Role Labelling",
author = "Mohammadshahi, Alireza and
Henderson, James",
editor = "Can, Burcu and
Mozes, Maximilian and
Cahyawijaya, Samuel and
Saphra, Naomi and
Kassner, Nora and
Ravfogel, Shauli and
Ravichander, Abhilasha and
Zhao, Chen and
Augenstein, Isabelle and
Rogers, Anna and
Cho, Kyunghyun and
Grefenstette, Edward and
Voita, Lena",
booktitle = "Proceedings of the 8th Workshop on Representation Learning for NLP (RepL4NLP 2023)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.repl4nlp-1.15",
doi = "10.18653/v1/2023.repl4nlp-1.15",
pages = "174--186",
abstract = "Recent models have shown that incorporating syntactic knowledge into the semantic role labelling (SRL) task leads to a significant improvement. In this paper, we propose Syntax-aware Graph-to-Graph Transformer (SynG2G-Tr) model, which encodes the syntactic structure using a novel way to input graph relations as embeddings, directly into the self-attention mechanism of Transformer. This approach adds a soft bias towards attention patterns that follow the syntactic structure but also allows the model to use this information to learn alternative patterns. We evaluate our model on both span-based and dependency-based SRL datasets, and outperform previous alternative methods in both in-domain and out-of-domain settings, on CoNLL 2005 and CoNLL 2009 datasets.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="mohammadshahi-henderson-2023-syntax">
<titleInfo>
<title>Syntax-Aware Graph-to-Graph Transformer for Semantic Role Labelling</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alireza</namePart>
<namePart type="family">Mohammadshahi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">James</namePart>
<namePart type="family">Henderson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 8th Workshop on Representation Learning for NLP (RepL4NLP 2023)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Burcu</namePart>
<namePart type="family">Can</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maximilian</namePart>
<namePart type="family">Mozes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Samuel</namePart>
<namePart type="family">Cahyawijaya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Naomi</namePart>
<namePart type="family">Saphra</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nora</namePart>
<namePart type="family">Kassner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shauli</namePart>
<namePart type="family">Ravfogel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Abhilasha</namePart>
<namePart type="family">Ravichander</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chen</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Isabelle</namePart>
<namePart type="family">Augenstein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Rogers</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kyunghyun</namePart>
<namePart type="family">Cho</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Edward</namePart>
<namePart type="family">Grefenstette</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lena</namePart>
<namePart type="family">Voita</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Toronto, Canada</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent models have shown that incorporating syntactic knowledge into the semantic role labelling (SRL) task leads to a significant improvement. In this paper, we propose Syntax-aware Graph-to-Graph Transformer (SynG2G-Tr) model, which encodes the syntactic structure using a novel way to input graph relations as embeddings, directly into the self-attention mechanism of Transformer. This approach adds a soft bias towards attention patterns that follow the syntactic structure but also allows the model to use this information to learn alternative patterns. We evaluate our model on both span-based and dependency-based SRL datasets, and outperform previous alternative methods in both in-domain and out-of-domain settings, on CoNLL 2005 and CoNLL 2009 datasets.</abstract>
<identifier type="citekey">mohammadshahi-henderson-2023-syntax</identifier>
<identifier type="doi">10.18653/v1/2023.repl4nlp-1.15</identifier>
<location>
<url>https://aclanthology.org/2023.repl4nlp-1.15</url>
</location>
<part>
<date>2023-07</date>
<extent unit="page">
<start>174</start>
<end>186</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Syntax-Aware Graph-to-Graph Transformer for Semantic Role Labelling
%A Mohammadshahi, Alireza
%A Henderson, James
%Y Can, Burcu
%Y Mozes, Maximilian
%Y Cahyawijaya, Samuel
%Y Saphra, Naomi
%Y Kassner, Nora
%Y Ravfogel, Shauli
%Y Ravichander, Abhilasha
%Y Zhao, Chen
%Y Augenstein, Isabelle
%Y Rogers, Anna
%Y Cho, Kyunghyun
%Y Grefenstette, Edward
%Y Voita, Lena
%S Proceedings of the 8th Workshop on Representation Learning for NLP (RepL4NLP 2023)
%D 2023
%8 July
%I Association for Computational Linguistics
%C Toronto, Canada
%F mohammadshahi-henderson-2023-syntax
%X Recent models have shown that incorporating syntactic knowledge into the semantic role labelling (SRL) task leads to a significant improvement. In this paper, we propose Syntax-aware Graph-to-Graph Transformer (SynG2G-Tr) model, which encodes the syntactic structure using a novel way to input graph relations as embeddings, directly into the self-attention mechanism of Transformer. This approach adds a soft bias towards attention patterns that follow the syntactic structure but also allows the model to use this information to learn alternative patterns. We evaluate our model on both span-based and dependency-based SRL datasets, and outperform previous alternative methods in both in-domain and out-of-domain settings, on CoNLL 2005 and CoNLL 2009 datasets.
%R 10.18653/v1/2023.repl4nlp-1.15
%U https://aclanthology.org/2023.repl4nlp-1.15
%U https://doi.org/10.18653/v1/2023.repl4nlp-1.15
%P 174-186
Markdown (Informal)
[Syntax-Aware Graph-to-Graph Transformer for Semantic Role Labelling](https://aclanthology.org/2023.repl4nlp-1.15) (Mohammadshahi & Henderson, RepL4NLP 2023)
ACL