@inproceedings{zhang-etal-2022-efficient-robust,
title = "Efficient and Robust Knowledge Graph Construction",
author = "Zhang, Ningyu and
Gui, Tao and
Nan, Guoshun",
editor = "Alonso, Miguel A. and
Wei, Zhongyu",
booktitle = "Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing: Tutorial Abstracts",
month = nov,
year = "2022",
address = "Taipei",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.aacl-tutorials.1/",
doi = "10.18653/v1/2022.aacl-tutorials.1",
pages = "1--7",
abstract = "Knowledge graph construction which aims to extract knowledge from the text corpus, has appealed to the NLP community researchers. Previous decades have witnessed the remarkable progress of knowledge graph construction on the basis of neural models; however, those models often cost massive computation or labeled data resources and suffer from unstable inference accounting for biased or adversarial samples. Recently, numerous approaches have been explored to mitigate the efficiency and robustness issues for knowledge graph construction, such as prompt learning and adversarial training. In this tutorial, we aim to bring interested NLP researchers up to speed on the recent and ongoing techniques for efficient and robust knowledge graph construction. Additionally, our goal is to provide a systematic and up-to-date overview of these methods and reveal new research opportunities to the audience."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhang-etal-2022-efficient-robust">
<titleInfo>
<title>Efficient and Robust Knowledge Graph Construction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ningyu</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tao</namePart>
<namePart type="family">Gui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Guoshun</namePart>
<namePart type="family">Nan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing: Tutorial Abstracts</title>
</titleInfo>
<name type="personal">
<namePart type="given">Miguel</namePart>
<namePart type="given">A</namePart>
<namePart type="family">Alonso</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhongyu</namePart>
<namePart type="family">Wei</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Taipei</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Knowledge graph construction which aims to extract knowledge from the text corpus, has appealed to the NLP community researchers. Previous decades have witnessed the remarkable progress of knowledge graph construction on the basis of neural models; however, those models often cost massive computation or labeled data resources and suffer from unstable inference accounting for biased or adversarial samples. Recently, numerous approaches have been explored to mitigate the efficiency and robustness issues for knowledge graph construction, such as prompt learning and adversarial training. In this tutorial, we aim to bring interested NLP researchers up to speed on the recent and ongoing techniques for efficient and robust knowledge graph construction. Additionally, our goal is to provide a systematic and up-to-date overview of these methods and reveal new research opportunities to the audience.</abstract>
<identifier type="citekey">zhang-etal-2022-efficient-robust</identifier>
<identifier type="doi">10.18653/v1/2022.aacl-tutorials.1</identifier>
<location>
<url>https://aclanthology.org/2022.aacl-tutorials.1/</url>
</location>
<part>
<date>2022-11</date>
<extent unit="page">
<start>1</start>
<end>7</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Efficient and Robust Knowledge Graph Construction
%A Zhang, Ningyu
%A Gui, Tao
%A Nan, Guoshun
%Y Alonso, Miguel A.
%Y Wei, Zhongyu
%S Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing: Tutorial Abstracts
%D 2022
%8 November
%I Association for Computational Linguistics
%C Taipei
%F zhang-etal-2022-efficient-robust
%X Knowledge graph construction which aims to extract knowledge from the text corpus, has appealed to the NLP community researchers. Previous decades have witnessed the remarkable progress of knowledge graph construction on the basis of neural models; however, those models often cost massive computation or labeled data resources and suffer from unstable inference accounting for biased or adversarial samples. Recently, numerous approaches have been explored to mitigate the efficiency and robustness issues for knowledge graph construction, such as prompt learning and adversarial training. In this tutorial, we aim to bring interested NLP researchers up to speed on the recent and ongoing techniques for efficient and robust knowledge graph construction. Additionally, our goal is to provide a systematic and up-to-date overview of these methods and reveal new research opportunities to the audience.
%R 10.18653/v1/2022.aacl-tutorials.1
%U https://aclanthology.org/2022.aacl-tutorials.1/
%U https://doi.org/10.18653/v1/2022.aacl-tutorials.1
%P 1-7
Markdown (Informal)
[Efficient and Robust Knowledge Graph Construction](https://aclanthology.org/2022.aacl-tutorials.1/) (Zhang et al., AACL-IJCNLP 2022)
ACL
- Ningyu Zhang, Tao Gui, and Guoshun Nan. 2022. Efficient and Robust Knowledge Graph Construction. In Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing: Tutorial Abstracts, pages 1–7, Taipei. Association for Computational Linguistics.