@inproceedings{sahu-etal-2024-pelican,
title = "Pelican: Correcting Hallucination in Vision-{LLM}s via Claim Decomposition and Program of Thought Verification",
author = "Sahu, Pritish and
Sikka, Karan and
Divakaran, Ajay",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.emnlp-main.470",
doi = "10.18653/v1/2024.emnlp-main.470",
pages = "8228--8248",
abstract = "Large Visual Language Models (LVLMs) struggle with hallucinations in visual instruction following task(s). These issues hinder their trustworthiness and real-world applicability. We propose Pelican {--} a novel framework designed to detect and mitigate hallucinations through claim verification. Pelican first decomposes the visual claim into a chain of sub-claims based on first-order predicates. These sub-claims consists of (predicate, question) pairs and can be conceptualized as nodes of a computational graph. We then use use Program-of-Thought prompting to generate Python code for answering these questions through flexible composition of external tools. Pelican improves over prior work by introducing (1) intermediate variables for precise grounding of object instances, and (2) shared computation for answering the sub-question to enable adaptive corrections and inconsistency identification. We finally use reasoning abilities of LLM to verify the correctness of the the claim by considering the consistency and confidence of the (question, answer) pairs from each sub-claim. Our experiments demonstrate consistent performance improvements over various baseline LVLMs and existing hallucination mitigation approaches across several benchmarks.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="sahu-etal-2024-pelican">
<titleInfo>
<title>Pelican: Correcting Hallucination in Vision-LLMs via Claim Decomposition and Program of Thought Verification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Pritish</namePart>
<namePart type="family">Sahu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Karan</namePart>
<namePart type="family">Sikka</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ajay</namePart>
<namePart type="family">Divakaran</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yaser</namePart>
<namePart type="family">Al-Onaizan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Bansal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Large Visual Language Models (LVLMs) struggle with hallucinations in visual instruction following task(s). These issues hinder their trustworthiness and real-world applicability. We propose Pelican – a novel framework designed to detect and mitigate hallucinations through claim verification. Pelican first decomposes the visual claim into a chain of sub-claims based on first-order predicates. These sub-claims consists of (predicate, question) pairs and can be conceptualized as nodes of a computational graph. We then use use Program-of-Thought prompting to generate Python code for answering these questions through flexible composition of external tools. Pelican improves over prior work by introducing (1) intermediate variables for precise grounding of object instances, and (2) shared computation for answering the sub-question to enable adaptive corrections and inconsistency identification. We finally use reasoning abilities of LLM to verify the correctness of the the claim by considering the consistency and confidence of the (question, answer) pairs from each sub-claim. Our experiments demonstrate consistent performance improvements over various baseline LVLMs and existing hallucination mitigation approaches across several benchmarks.</abstract>
<identifier type="citekey">sahu-etal-2024-pelican</identifier>
<identifier type="doi">10.18653/v1/2024.emnlp-main.470</identifier>
<location>
<url>https://aclanthology.org/2024.emnlp-main.470</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>8228</start>
<end>8248</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Pelican: Correcting Hallucination in Vision-LLMs via Claim Decomposition and Program of Thought Verification
%A Sahu, Pritish
%A Sikka, Karan
%A Divakaran, Ajay
%Y Al-Onaizan, Yaser
%Y Bansal, Mohit
%Y Chen, Yun-Nung
%S Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, USA
%F sahu-etal-2024-pelican
%X Large Visual Language Models (LVLMs) struggle with hallucinations in visual instruction following task(s). These issues hinder their trustworthiness and real-world applicability. We propose Pelican – a novel framework designed to detect and mitigate hallucinations through claim verification. Pelican first decomposes the visual claim into a chain of sub-claims based on first-order predicates. These sub-claims consists of (predicate, question) pairs and can be conceptualized as nodes of a computational graph. We then use use Program-of-Thought prompting to generate Python code for answering these questions through flexible composition of external tools. Pelican improves over prior work by introducing (1) intermediate variables for precise grounding of object instances, and (2) shared computation for answering the sub-question to enable adaptive corrections and inconsistency identification. We finally use reasoning abilities of LLM to verify the correctness of the the claim by considering the consistency and confidence of the (question, answer) pairs from each sub-claim. Our experiments demonstrate consistent performance improvements over various baseline LVLMs and existing hallucination mitigation approaches across several benchmarks.
%R 10.18653/v1/2024.emnlp-main.470
%U https://aclanthology.org/2024.emnlp-main.470
%U https://doi.org/10.18653/v1/2024.emnlp-main.470
%P 8228-8248
Markdown (Informal)
[Pelican: Correcting Hallucination in Vision-LLMs via Claim Decomposition and Program of Thought Verification](https://aclanthology.org/2024.emnlp-main.470) (Sahu et al., EMNLP 2024)
ACL