@inproceedings{zouhar-etal-2022-knowledge,
title = "Knowledge Base Index Compression via Dimensionality and Precision Reduction",
author = "Zouhar, Vil{\'e}m and
Mosbach, Marius and
Zhang, Miaoran and
Klakow, Dietrich",
editor = "Das, Rajarshi and
Lewis, Patrick and
Min, Sewon and
Thai, June and
Zaheer, Manzil",
booktitle = "Proceedings of the 1st Workshop on Semiparametric Methods in NLP: Decoupling Logic from Knowledge",
month = may,
year = "2022",
address = "Dublin, Ireland and Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.spanlp-1.5",
doi = "10.18653/v1/2022.spanlp-1.5",
pages = "41--53",
abstract = "Recently neural network based approaches to knowledge-intensive NLP tasks, such as question answering, started to rely heavily on the combination of neural retrievers and readers. Retrieval is typically performed over a large textual knowledge base (KB) which requires significant memory and compute resources, especially when scaled up. On HotpotQA we systematically investigate reducing the size of the KB index by means of dimensionality (sparse random projections, PCA, autoencoders) and numerical precision reduction. Our results show that PCA is an easy solution that requires very little data and is only slightly worse than autoencoders, which are less stable. All methods are sensitive to pre- and post-processing and data should always be centered and normalized both before and after dimension reduction. Finally, we show that it is possible to combine PCA with using 1bit per dimension. Overall we achieve (1) 100$\times$ compression with 75{\%}, and (2) 24$\times$ compression with 92{\%} original retrieval performance.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zouhar-etal-2022-knowledge">
<titleInfo>
<title>Knowledge Base Index Compression via Dimensionality and Precision Reduction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Vilém</namePart>
<namePart type="family">Zouhar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marius</namePart>
<namePart type="family">Mosbach</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Miaoran</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dietrich</namePart>
<namePart type="family">Klakow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on Semiparametric Methods in NLP: Decoupling Logic from Knowledge</title>
</titleInfo>
<name type="personal">
<namePart type="given">Rajarshi</namePart>
<namePart type="family">Das</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Patrick</namePart>
<namePart type="family">Lewis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sewon</namePart>
<namePart type="family">Min</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">June</namePart>
<namePart type="family">Thai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Manzil</namePart>
<namePart type="family">Zaheer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland and Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recently neural network based approaches to knowledge-intensive NLP tasks, such as question answering, started to rely heavily on the combination of neural retrievers and readers. Retrieval is typically performed over a large textual knowledge base (KB) which requires significant memory and compute resources, especially when scaled up. On HotpotQA we systematically investigate reducing the size of the KB index by means of dimensionality (sparse random projections, PCA, autoencoders) and numerical precision reduction. Our results show that PCA is an easy solution that requires very little data and is only slightly worse than autoencoders, which are less stable. All methods are sensitive to pre- and post-processing and data should always be centered and normalized both before and after dimension reduction. Finally, we show that it is possible to combine PCA with using 1bit per dimension. Overall we achieve (1) 100\times compression with 75%, and (2) 24\times compression with 92% original retrieval performance.</abstract>
<identifier type="citekey">zouhar-etal-2022-knowledge</identifier>
<identifier type="doi">10.18653/v1/2022.spanlp-1.5</identifier>
<location>
<url>https://aclanthology.org/2022.spanlp-1.5</url>
</location>
<part>
<date>2022-05</date>
<extent unit="page">
<start>41</start>
<end>53</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Knowledge Base Index Compression via Dimensionality and Precision Reduction
%A Zouhar, Vilém
%A Mosbach, Marius
%A Zhang, Miaoran
%A Klakow, Dietrich
%Y Das, Rajarshi
%Y Lewis, Patrick
%Y Min, Sewon
%Y Thai, June
%Y Zaheer, Manzil
%S Proceedings of the 1st Workshop on Semiparametric Methods in NLP: Decoupling Logic from Knowledge
%D 2022
%8 May
%I Association for Computational Linguistics
%C Dublin, Ireland and Online
%F zouhar-etal-2022-knowledge
%X Recently neural network based approaches to knowledge-intensive NLP tasks, such as question answering, started to rely heavily on the combination of neural retrievers and readers. Retrieval is typically performed over a large textual knowledge base (KB) which requires significant memory and compute resources, especially when scaled up. On HotpotQA we systematically investigate reducing the size of the KB index by means of dimensionality (sparse random projections, PCA, autoencoders) and numerical precision reduction. Our results show that PCA is an easy solution that requires very little data and is only slightly worse than autoencoders, which are less stable. All methods are sensitive to pre- and post-processing and data should always be centered and normalized both before and after dimension reduction. Finally, we show that it is possible to combine PCA with using 1bit per dimension. Overall we achieve (1) 100\times compression with 75%, and (2) 24\times compression with 92% original retrieval performance.
%R 10.18653/v1/2022.spanlp-1.5
%U https://aclanthology.org/2022.spanlp-1.5
%U https://doi.org/10.18653/v1/2022.spanlp-1.5
%P 41-53
Markdown (Informal)
[Knowledge Base Index Compression via Dimensionality and Precision Reduction](https://aclanthology.org/2022.spanlp-1.5) (Zouhar et al., SpaNLP 2022)
ACL