@inproceedings{coman-etal-2024-gadepo,
title = "{GAD}e{P}o: Graph-Assisted Declarative Pooling Transformers for Document-Level Relation Extraction",
author = "Coman, Andrei and
Theodoropoulos, Christos and
Moens, Marie-Francine and
Henderson, James",
editor = "Yu, Wenhao and
Shi, Weijia and
Yasunaga, Michihiro and
Jiang, Meng and
Zhu, Chenguang and
Hajishirzi, Hannaneh and
Zettlemoyer, Luke and
Zhang, Zhihan",
booktitle = "Proceedings of the 3rd Workshop on Knowledge Augmented Methods for NLP",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.knowledgenlp-1.1/",
doi = "10.18653/v1/2024.knowledgenlp-1.1",
pages = "1--14",
abstract = "Document-level relation extraction typically relies on text-based encoders and hand-coded pooling heuristics to aggregate information learned by the encoder. In this paper, we leverage the intrinsic graph processing capabilities of the Transformer model and propose replacing hand-coded pooling methods with new tokens in the input, which are designed to aggregate information via explicit graph relations in the computation of attention weights. We introduce a joint text-graph Transformer model and a graph-assisted declarative pooling (GADePo) specification of the input, which provides explicit and high-level instructions for information aggregation. GADePo allows the pooling process to be guided by domain-specific knowledge or desired outcomes but still learned by the Transformer, leading to more flexible and customisable pooling strategies. We evaluate our method across diverse datasets and models and show that our approach yields promising results that are consistently better than those achieved by the hand-coded pooling functions."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="coman-etal-2024-gadepo">
<titleInfo>
<title>GADePo: Graph-Assisted Declarative Pooling Transformers for Document-Level Relation Extraction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Andrei</namePart>
<namePart type="family">Coman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Theodoropoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marie-Francine</namePart>
<namePart type="family">Moens</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">James</namePart>
<namePart type="family">Henderson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 3rd Workshop on Knowledge Augmented Methods for NLP</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wenhao</namePart>
<namePart type="family">Yu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Weijia</namePart>
<namePart type="family">Shi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michihiro</namePart>
<namePart type="family">Yasunaga</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Meng</namePart>
<namePart type="family">Jiang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chenguang</namePart>
<namePart type="family">Zhu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hannaneh</namePart>
<namePart type="family">Hajishirzi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Luke</namePart>
<namePart type="family">Zettlemoyer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhihan</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Bangkok, Thailand</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Document-level relation extraction typically relies on text-based encoders and hand-coded pooling heuristics to aggregate information learned by the encoder. In this paper, we leverage the intrinsic graph processing capabilities of the Transformer model and propose replacing hand-coded pooling methods with new tokens in the input, which are designed to aggregate information via explicit graph relations in the computation of attention weights. We introduce a joint text-graph Transformer model and a graph-assisted declarative pooling (GADePo) specification of the input, which provides explicit and high-level instructions for information aggregation. GADePo allows the pooling process to be guided by domain-specific knowledge or desired outcomes but still learned by the Transformer, leading to more flexible and customisable pooling strategies. We evaluate our method across diverse datasets and models and show that our approach yields promising results that are consistently better than those achieved by the hand-coded pooling functions.</abstract>
<identifier type="citekey">coman-etal-2024-gadepo</identifier>
<identifier type="doi">10.18653/v1/2024.knowledgenlp-1.1</identifier>
<location>
<url>https://aclanthology.org/2024.knowledgenlp-1.1/</url>
</location>
<part>
<date>2024-08</date>
<extent unit="page">
<start>1</start>
<end>14</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T GADePo: Graph-Assisted Declarative Pooling Transformers for Document-Level Relation Extraction
%A Coman, Andrei
%A Theodoropoulos, Christos
%A Moens, Marie-Francine
%A Henderson, James
%Y Yu, Wenhao
%Y Shi, Weijia
%Y Yasunaga, Michihiro
%Y Jiang, Meng
%Y Zhu, Chenguang
%Y Hajishirzi, Hannaneh
%Y Zettlemoyer, Luke
%Y Zhang, Zhihan
%S Proceedings of the 3rd Workshop on Knowledge Augmented Methods for NLP
%D 2024
%8 August
%I Association for Computational Linguistics
%C Bangkok, Thailand
%F coman-etal-2024-gadepo
%X Document-level relation extraction typically relies on text-based encoders and hand-coded pooling heuristics to aggregate information learned by the encoder. In this paper, we leverage the intrinsic graph processing capabilities of the Transformer model and propose replacing hand-coded pooling methods with new tokens in the input, which are designed to aggregate information via explicit graph relations in the computation of attention weights. We introduce a joint text-graph Transformer model and a graph-assisted declarative pooling (GADePo) specification of the input, which provides explicit and high-level instructions for information aggregation. GADePo allows the pooling process to be guided by domain-specific knowledge or desired outcomes but still learned by the Transformer, leading to more flexible and customisable pooling strategies. We evaluate our method across diverse datasets and models and show that our approach yields promising results that are consistently better than those achieved by the hand-coded pooling functions.
%R 10.18653/v1/2024.knowledgenlp-1.1
%U https://aclanthology.org/2024.knowledgenlp-1.1/
%U https://doi.org/10.18653/v1/2024.knowledgenlp-1.1
%P 1-14
Markdown (Informal)
[GADePo: Graph-Assisted Declarative Pooling Transformers for Document-Level Relation Extraction](https://aclanthology.org/2024.knowledgenlp-1.1/) (Coman et al., KnowledgeNLP 2024)
ACL