@inproceedings{li-etal-2025-knowledge,
title = "Knowledge Graph Pooling and Unpooling for Concept Abstraction",
author = "Li, Juan and
Zhang, Wen and
Liu, Zhiqiang and
Tu, Mingchen and
Chen, Mingyang and
Zhang, Ningyu and
Li, Shijian",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.coling-main.359/",
pages = "5364--5374",
abstract = "Knowledge graph embedding (KGE) aims to embed entities and relations as vectors in a continuous space and has proven to be effective for KG tasks. Recently, graph neural networks (GNN) based KGEs gain much attention due to their strong capability of encoding complex graph structures. However, most GNN-based KGEs are directly optimized based on the instance triples in KGs, ignoring the latent concepts and hierarchies of the entities. Though some works explicitly inject concepts and hierarchies into models, they are limited to predefined concepts and hierarchies, which are missing in a lot of KGs. Thus in this paper, we propose a novel framework with KG Pooling and unpooling and Contrastive Learning (KGPCL) to abstract and encode the latent concepts for better KG prediction. Specifically, with an input KG, we first construct a U-KG through KG pooling and unpooling. KG pooling abstracts the input graph to a smaller graph as a pooled graph, and KG unpooling recovers the input graph from the pooled graph. Then we model the U-KG with relational KGEs to get the representations of entities and relations for prediction. Finally, we propose the local and global contrastive loss to jointly enhance the representation of entities. Experimental results show that our models outperform the KGE baselines on link prediction task."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="li-etal-2025-knowledge">
<titleInfo>
<title>Knowledge Graph Pooling and Unpooling for Concept Abstraction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wen</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhiqiang</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mingchen</namePart>
<namePart type="family">Tu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mingyang</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ningyu</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shijian</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-01</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 31st International Conference on Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Owen</namePart>
<namePart type="family">Rambow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Leo</namePart>
<namePart type="family">Wanner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marianna</namePart>
<namePart type="family">Apidianaki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hend</namePart>
<namePart type="family">Al-Khalifa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Barbara</namePart>
<namePart type="given">Di</namePart>
<namePart type="family">Eugenio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Schockaert</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, UAE</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Knowledge graph embedding (KGE) aims to embed entities and relations as vectors in a continuous space and has proven to be effective for KG tasks. Recently, graph neural networks (GNN) based KGEs gain much attention due to their strong capability of encoding complex graph structures. However, most GNN-based KGEs are directly optimized based on the instance triples in KGs, ignoring the latent concepts and hierarchies of the entities. Though some works explicitly inject concepts and hierarchies into models, they are limited to predefined concepts and hierarchies, which are missing in a lot of KGs. Thus in this paper, we propose a novel framework with KG Pooling and unpooling and Contrastive Learning (KGPCL) to abstract and encode the latent concepts for better KG prediction. Specifically, with an input KG, we first construct a U-KG through KG pooling and unpooling. KG pooling abstracts the input graph to a smaller graph as a pooled graph, and KG unpooling recovers the input graph from the pooled graph. Then we model the U-KG with relational KGEs to get the representations of entities and relations for prediction. Finally, we propose the local and global contrastive loss to jointly enhance the representation of entities. Experimental results show that our models outperform the KGE baselines on link prediction task.</abstract>
<identifier type="citekey">li-etal-2025-knowledge</identifier>
<location>
<url>https://aclanthology.org/2025.coling-main.359/</url>
</location>
<part>
<date>2025-01</date>
<extent unit="page">
<start>5364</start>
<end>5374</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Knowledge Graph Pooling and Unpooling for Concept Abstraction
%A Li, Juan
%A Zhang, Wen
%A Liu, Zhiqiang
%A Tu, Mingchen
%A Chen, Mingyang
%A Zhang, Ningyu
%A Li, Shijian
%Y Rambow, Owen
%Y Wanner, Leo
%Y Apidianaki, Marianna
%Y Al-Khalifa, Hend
%Y Eugenio, Barbara Di
%Y Schockaert, Steven
%S Proceedings of the 31st International Conference on Computational Linguistics
%D 2025
%8 January
%I Association for Computational Linguistics
%C Abu Dhabi, UAE
%F li-etal-2025-knowledge
%X Knowledge graph embedding (KGE) aims to embed entities and relations as vectors in a continuous space and has proven to be effective for KG tasks. Recently, graph neural networks (GNN) based KGEs gain much attention due to their strong capability of encoding complex graph structures. However, most GNN-based KGEs are directly optimized based on the instance triples in KGs, ignoring the latent concepts and hierarchies of the entities. Though some works explicitly inject concepts and hierarchies into models, they are limited to predefined concepts and hierarchies, which are missing in a lot of KGs. Thus in this paper, we propose a novel framework with KG Pooling and unpooling and Contrastive Learning (KGPCL) to abstract and encode the latent concepts for better KG prediction. Specifically, with an input KG, we first construct a U-KG through KG pooling and unpooling. KG pooling abstracts the input graph to a smaller graph as a pooled graph, and KG unpooling recovers the input graph from the pooled graph. Then we model the U-KG with relational KGEs to get the representations of entities and relations for prediction. Finally, we propose the local and global contrastive loss to jointly enhance the representation of entities. Experimental results show that our models outperform the KGE baselines on link prediction task.
%U https://aclanthology.org/2025.coling-main.359/
%P 5364-5374
Markdown (Informal)
[Knowledge Graph Pooling and Unpooling for Concept Abstraction](https://aclanthology.org/2025.coling-main.359/) (Li et al., COLING 2025)
ACL
- Juan Li, Wen Zhang, Zhiqiang Liu, Mingchen Tu, Mingyang Chen, Ningyu Zhang, and Shijian Li. 2025. Knowledge Graph Pooling and Unpooling for Concept Abstraction. In Proceedings of the 31st International Conference on Computational Linguistics, pages 5364–5374, Abu Dhabi, UAE. Association for Computational Linguistics.