|
{ |
|
"paper_id": "2022", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:12:42.828759Z" |
|
}, |
|
"title": "Document Retrieval and Claim Verification to Mitigate COVID-19 Misinformation", |
|
"authors": [ |
|
{ |
|
"first": "Megha", |
|
"middle": [], |
|
"last": "Sundriyal", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ganeshan", |
|
"middle": [], |
|
"last": "Malhotra", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "BITS Pilani", |
|
"location": { |
|
"settlement": "Goa", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Shad", |
|
"middle": [], |
|
"last": "Akhtar", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Shubhashis", |
|
"middle": [], |
|
"last": "Sengupta", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Accenture Labs", |
|
"institution": "", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Fano", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Accenture Labs", |
|
"institution": "", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Tanmoy", |
|
"middle": [], |
|
"last": "Chakraborty", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Iiit", |
|
"middle": [ |
|
"-" |
|
], |
|
"last": "Delhi", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "During the COVID-19 pandemic, the spread of misinformation on online social media has grown exponentially. Unverified bogus claims on these platforms regularly mislead people, leading them to believe in half-baked truths. The current vogue is to employ manual fact-checkers to verify claims to combat this avalanche of misinformation. However, establishing such claims' veracity is becoming increasingly challenging, partly due to the plethora of information available, which is difficult to process manually. Thus, it becomes imperative to verify claims automatically without human interventions. To cope up with this issue, we propose an automated claim verification solution encompassing two steps-document retrieval and veracity prediction. For the retrieval module, we employ a hybrid search-based system with BM25 as a base retriever and experiment with recent state-of-theart transformer-based models for re-ranking. Furthermore, we use a BART-based textual entailment architecture to authenticate the retrieved documents in the later step. We report experimental findings, demonstrating that our retrieval module outperforms the best baseline system by 10.32 NDCG@100 points. We escort a demonstration to assess the efficacy and impact of our suggested solution. As a byproduct of this study, we present an open-source, easily deployable, and user-friendly Python API that the community can adopt.", |
|
"pdf_parse": { |
|
"paper_id": "2022", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "During the COVID-19 pandemic, the spread of misinformation on online social media has grown exponentially. Unverified bogus claims on these platforms regularly mislead people, leading them to believe in half-baked truths. The current vogue is to employ manual fact-checkers to verify claims to combat this avalanche of misinformation. However, establishing such claims' veracity is becoming increasingly challenging, partly due to the plethora of information available, which is difficult to process manually. Thus, it becomes imperative to verify claims automatically without human interventions. To cope up with this issue, we propose an automated claim verification solution encompassing two steps-document retrieval and veracity prediction. For the retrieval module, we employ a hybrid search-based system with BM25 as a base retriever and experiment with recent state-of-theart transformer-based models for re-ranking. Furthermore, we use a BART-based textual entailment architecture to authenticate the retrieved documents in the later step. We report experimental findings, demonstrating that our retrieval module outperforms the best baseline system by 10.32 NDCG@100 points. We escort a demonstration to assess the efficacy and impact of our suggested solution. As a byproduct of this study, we present an open-source, easily deployable, and user-friendly Python API that the community can adopt.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The escalating drift of online social media platforms has led to a massive rise in online content consumers. Participation in these platforms has swung into another correspondence, which is no longer limited by physical barriers. Because of their speed and focused information, these platforms facilitate the dissemination of personal thoughts and information to a much larger audience. However, at the same time, these platforms have enriched an equally docile environment for malicious users to promulgate fake news, bogus claims, rumors and misinformation. There have been numerous cases where the propagation of malicious unverified content has influenced the entire society. One such concrete example is the 2016 Presidential Elections in the United States, which witnessed the alarming impact of false news, with many citizens swayed by a fraudulent website (Grave et al., 2018) . Allcott and Gentzkow (2017) revealed that nearly 25% of American citizens visited a fake news website that aimed at manipulating the general public's cognitive process and consequently clouted the eventual conclusion of the election. Another recent example is the global pandemic of COVID-19. When the entire world went into lockdown, the virtual world encountered a great closeness transforming social media platforms into the primary conduits for information consumption and dissemination. Consequently, there has been an accretion of 50%-70% in total Internet hits in the year 2020 (Beech, 2020) . Around the same time, enormous social media posts with unverified bogus claims about the pandemic began to arise, frequently spurring life-threatening remedies (Naeem and Bhatti, 2020) . Such claims had an unprecedented impact, resulting in monetary damage and the loss of priceless human lives. A study revealed that at least 800 individuals died worldwide in the first quarter of 2020 due to misinformation about COVID-19 (Coleman, 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 864, |
|
"end": 884, |
|
"text": "(Grave et al., 2018)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 887, |
|
"end": 914, |
|
"text": "Allcott and Gentzkow (2017)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1472, |
|
"end": 1485, |
|
"text": "(Beech, 2020)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1648, |
|
"end": 1672, |
|
"text": "(Naeem and Bhatti, 2020)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1912, |
|
"end": 1927, |
|
"text": "(Coleman, 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Motivation: A slew of such incidents has continued to emerge from the worldwide community in recent years. Thousands of people read these unverified claims online and spread misinformation if the claims' integrity is not corroborated. As a result, a variety of manual fact-checking organizations have evolved to address this concerning issue. Unfortunately, the enormity of misinformation floating around on the Internet has developed into a global infodemic 1 making their efforts untenable. To alleviate this bottleneck, the process of automating fact-checking has recently garnered a lot of consideration in the research world. Vlachos and Riedel (2014) formalized the task of fact-checking and claim verification as a series of components -identifying claims to be evaluated, extracting relevant shreds of evidence, and delivering verdicts. As a result, this facilitated the establishment of automated fact-checking pipelines composed of subcomponents that can be mapped to tasks well-studied in the NLP community. The task of retrieving relevant information has gained a lot of impetus in recent years, especially with the introduction of tools like PYSERINI 2 and BEIR 3 . Furthermore, advancements were made by establishing datasets of either claims acquired from fact-checking websites (Wang, 2017) or datasets curated specifically for research (Thorne et al., 2018a) . The recent release of the CORD-19 dataset 4 , consisting of more than 500,000 articles, has provided access to thousands of scientific articles on the prevention techniques, spread, transmission, and cures of the COVID-19. The dataset consists of more than 500,000 articles.", |
|
"cite_spans": [ |
|
{ |
|
"start": 631, |
|
"end": 656, |
|
"text": "Vlachos and Riedel (2014)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 1294, |
|
"end": 1306, |
|
"text": "(Wang, 2017)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 1353, |
|
"end": 1375, |
|
"text": "(Thorne et al., 2018a)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "State-of-the-art and Challenges: Previous research in the realm of claim verification and factchecking has primarily concentrated on structured data, often in the form of subject-predicate-object statements (Dong et al., 2015; Nakashole and Mitchell, 2014) . Several research on detecting false claims on social media included network metadata such as user profile characteristics, user-user interactions, popularity attributes based on the number of likes or followers, etc (Kumar et al., 2016; Qazvinian et al., 2011) . Most notably, all of these procedures use black-box approaches, and hence, do not articulate why a statement is considered verified. Another pressing issue is that the input claim does not coexist naturally with the corresponding review articles. As a result, obtaining the relevant articles via internet is critical. There is, however, a disparity between the human-crafted review articles generated specifically for claim verification in the fact database and the report articles gathered from the web. Meanwhile, methods such as ClaimBuster 5 and Google's Fact Check Explorer 6 have been developed to check the legitimacy of the statement by assessing trust criteria utilizing internet. However, these existing methods are not intended to investigate the veracity of the evidence and hence fail to meet the previously identified issues.", |
|
"cite_spans": [ |
|
{ |
|
"start": 207, |
|
"end": 226, |
|
"text": "(Dong et al., 2015;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 227, |
|
"end": 256, |
|
"text": "Nakashole and Mitchell, 2014)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 475, |
|
"end": 495, |
|
"text": "(Kumar et al., 2016;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 496, |
|
"end": 519, |
|
"text": "Qazvinian et al., 2011)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our Contributions: To address the aforementioned issues, we create an end-to-end claim verification system capable of establishing the integrity of a query claim and explaining its decisions with supporting evidence. Our model takes in as input the claim whose veracity is to be verified. Due to the diversity of natural language idioms, the first major problem in developing such a system is identifying connected snippets of a claim. Thus, we utilize well-known retrieval systems for this task. The system selects relevant articles from either the CORD-19 dataset or our in-house dataset, ClaVer, using a host of different models ranging from BM25 to intricate hybrid searchers. Users can additionally opt to retrieve more fine-grained results where the model selects relevant snippets in the article. Eventually, the model verifies the claim by calculating the entailment of the input claim concerning the retrieved articles.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Through this work, we make the following contributions:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "1. To allay the unavailability of a COVID-19 centric annotated dataset for claim verification in Twitter, we develop ClaVer, a new dataset of claim-evidence pairs based on a subset of COVID-19-related claims reaped from a recently released large-scale claim-detection dataset, LESA (Gupta et al., 2021) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 282, |
|
"end": 302, |
|
"text": "(Gupta et al., 2021)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "2. We propose an end-to-end claim verification system encompassing two steps to validate the claims proffered online provided high-quality editorial review articles and Twitter posts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "3. We evaluate our retrieval model against multiple state-of-the-art systems concerning our dataset, ClaVer. According to the comparison, BM25 surpasses all other existing systems by a wide margin.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We provide an open-source, easily deployable, and user-friendly Python API based on our proposed solution for claim verification. We also accompany a demonstration to evaluate the efficacy and usage of the API.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The challenge of verifying claims on online social media has garnered considerable attention in the last several years. Initially, the task of automatic claim verification and fact-checking were investigated in the context of computational journalism (Cohen et al., 2011; Flew et al., 2012) , and journalists and professional fact-debunkers manually verified claims utilizing various information sources. However, that was not just time-consuming but also introduced substantial human bias in it. The recent advancement in NLP and information retrieval (IR) has equipped journalists and online social media users with tools enabling automatic claim verification. In the past few years, plenty of work has been proposed to fact-check online claims. Vlachos and Riedel (2014) presented the initial pioneering work in this domain. They published the first claim verification dataset, which included 106 statements taken from fact-checking websites like PolitiFact. However, they lacked justification for the verdict, which verification systems typically require. To address this issue, Wang (2017) prolonged this approach by introducing 12.8K claims from PolitiFact along with their explanations. The Fact Extraction and Verification (FEVER) shared task was launched to advance research in this direction (Thorne et al., 2018b) . The organizers of the FEVER shared task constructed a large-scale dataset of 185445 claims based on Wikipedia articles, each of which comes with several evidence sets. Traditionally, the existing claim verification systems primarily rely on textual content and/or social context. The content-based methods essentially acquire the n-grams (Wang, 2017) , semantics (Khattar et al., 2019) , writing styles (Gr\u00f6ndahl and Asokan, 2019) , etc. Besides textual-content, auxiliary knowledge around social-context has also been extensively examined for verification tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 251, |
|
"end": 271, |
|
"text": "(Cohen et al., 2011;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 272, |
|
"end": 290, |
|
"text": "Flew et al., 2012)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 748, |
|
"end": 773, |
|
"text": "Vlachos and Riedel (2014)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 1302, |
|
"end": 1324, |
|
"text": "(Thorne et al., 2018b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1665, |
|
"end": 1677, |
|
"text": "(Wang, 2017)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 1690, |
|
"end": 1712, |
|
"text": "(Khattar et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1730, |
|
"end": 1757, |
|
"text": "(Gr\u00f6ndahl and Asokan, 2019)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "These context-based methods emphasize collecting user profile-based (Shu et al., 2019) , propagation structure-based , source-based (Pennycook and Rand, 2019), etc. Zhi et al. (2017) introduced ClaimVerif that provides a credibility score for a user given a claim and also gives supporting evidences that justify the credibility score. Hanselowski et al. (2018) presented their approach to the FEVER task (Thorne et al., 2018b) which was introduced to expedite the development of fact verification systems, in which they used entity linking for document retrieval and Enhanced Sequential Inference Model for determining the entailment. Ma et al. (2019) used Hierarchical Attention Networks with sentence-level evidence embeddings. Despite the fact that these tactics produce good performance results, it is challenging for these approaches to provide adequate reasons for claim verification outcomes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 68, |
|
"end": 86, |
|
"text": "(Shu et al., 2019)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 132, |
|
"end": 146, |
|
"text": "(Pennycook and", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 147, |
|
"end": 182, |
|
"text": "Rand, 2019), etc. Zhi et al. (2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 336, |
|
"end": 361, |
|
"text": "Hanselowski et al. (2018)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 405, |
|
"end": 427, |
|
"text": "(Thorne et al., 2018b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 636, |
|
"end": 652, |
|
"text": "Ma et al. (2019)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "As a result, current research has focused on interpretable claim verification, which develops interactive models to examine the distinction. Attentionbased interaction models (Popat et al., 2018) , gate fusion interactive models (Wu and Rao, 2020) , coherence modelling interactive models (Ma et al., 2019) , and graph-aware interaction models are among the interactive models. The granularity of captured semantic conflicts involves word-level (Popat et al., 2018) , sentence-level (Ma et al., 2019) , and multi-feature (Wu and Rao, 2020) conflicts. Su et al. (2020) came up with a question-answeringbased model that mines relevant articles from the CORD-19 dataset and summarizes them to answer pressing questions about the COVID-19 pandemic. Recently, proposed a T5 7 transformer-based architecture for abstract retrieval, sentence selection and label prediction and perform claim verification. Similar to us, they also utilized the CORD-19 (Wang et al., 2020) corpus as the knowledge base to retrieve shreds of evidences. These methods, which employ semantic conflicts to verify claims, reflect a certain degree of interpretability. But not all conflicts can be used as valid evidence to reasonably explain the results, and they also include considerable conflicts unrelated to claims or even interfere with the verified results. It is difficult for automatic claim verification to provide reasonable explanations for the Claim: 1 @CNN Boosting our immune systems will help deter the virus. It's our only defense aside from n95 masks and goggles", |
|
"cite_spans": [ |
|
{ |
|
"start": 175, |
|
"end": 195, |
|
"text": "(Popat et al., 2018)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 229, |
|
"end": 247, |
|
"text": "(Wu and Rao, 2020)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 289, |
|
"end": 306, |
|
"text": "(Ma et al., 2019)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 445, |
|
"end": 465, |
|
"text": "(Popat et al., 2018)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 483, |
|
"end": 500, |
|
"text": "(Ma et al., 2019)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 944, |
|
"end": 963, |
|
"text": "(Wang et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Label First, there's the not-so-great news. Despite claims you may have seen on the Internet, there's no magic food or pill that is guaranteed to boost your immune system and protect you against coronavirus...There are ways to keep your immune system functioning optimally, which can help to keep you healthy and give you a sense of control in an uncertain time...For a starter dose of immune-boosting vitamins, minerals and antioxidants, fill half of your plate with vegetables and fruits.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evidence", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Claim: 2 @AFP @EvelDick It's much more than a coincidence that China has a bioweapons lab with sloppy protocols in Wuhan. Wonder if this is another booboo? Seems like a very bad place to have a bioweapons lab. The whole \"this came from snakes\" Chinese party line makes me think the virus was manufactured. Evidence Label As the Covid-19 pandemic continues its destructive course, two theories are being widely aired...The lab is one of 20 such facilities under the Chinese Academy of Sciences, but is the only one dealing with virology. Fully compliant with ISO standards, the Wuhan facility interacts regularly with a host of outside experts. Like other labs, its aim is to protect populations against new viruses...", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SUPPORTED", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "; the demand for interpretable claim verification is growing, with the goal of providing end-users with grounds to debunk rumours by showing the incorrect elements of claims. Existing methods in this assignment investigate semantic conflicts between claims and relevant articles by creating various interactive models to explain verification results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "REFUTED verification results", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For our experiments, we adopt two datasets. Their details are shown as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Description of the Datasets", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "1. CORD-19 Dataset (Wang et al., 2020) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 19, |
|
"end": 38, |
|
"text": "(Wang et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Description of the Datasets", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "CORD-19 dataset consists of over \u223c 500, 000 articles (over \u223c 200, 000 containing full text) taken from various scientific publications about COVID-19, SARS-COV2 and other viruses. This dataset provides access to trustworthy scientific sources of information to mitigate the spread of misinformation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Description of the Datasets", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "2. LESA Dataset (Gupta et al., 2021) : LESA dataset consists of \u223c 10, 000 tweets that were mined from various sources and were manually annotated for the binary classification task of claim detection. Furthermore, we develop a validation set -Claim Verification (ClaVer) by selecting a subset of claims from the LESA dataset and annotating those claims with relevant articles that provide additional context for the claim, as shown in Table 1 . These articles are gathered from reliable online news sources and contain additional extensive information that may be used to verify the authenticity of the claim. The articles can \"Refute\" or \"Support\" the claim. In other circumstances, the claim may be that the annotated article does not give conclusive evidence. These articles lack sufficient information to support or reject the claim's veracity and hence labelled for \"Not Enough Information\". These articles are also stored in our global knowledge base of articles along with the articles taken from the CORD-19 dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 16, |
|
"end": 36, |
|
"text": "(Gupta et al., 2021)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 435, |
|
"end": 442, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Description of the Datasets", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Adhering to the standard of automated claim verification and fact-checking systems (Thorne et al., 2018b) , our proposed approach also consists of a two-step pipeline -Document Retrieval and Veracity Prediction. In this section, we present the techniques employed for retrieval and veracity prediction components. Besides the current approach, we had also employ alternative techniques using Rapid Automatic Keyword Extraction or RAKE (Rose et al., 2010) and SciSpacy (Neumann et al., 2019) for keyword extraction and searching our corpus using the extracted keywords. Figure 1 Figure 1 : An overview of the proposed evidence-based claim verification pipeline. The significant components have been highlighted to correspond to the two stages of our experimental setup: (a) a document retrieval module that uses one of the given datasets to retrieve top-k relevant documents for the corresponding input claim, and (b) a veracity prediction module that seeks to establish the retrieved documents' credibility against the input claim.", |
|
"cite_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 105, |
|
"text": "(Thorne et al., 2018b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 435, |
|
"end": 454, |
|
"text": "(Rose et al., 2010)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 468, |
|
"end": 490, |
|
"text": "(Neumann et al., 2019)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 569, |
|
"end": 577, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 578, |
|
"end": 586, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Our Approach", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "entailment decision for the claim with respect to the retrieved evidences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our Approach", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Inspired by IR systems, the retrieval problem we attempt to address is defined as follows: Given a textual claim c and a set of documents D, we aim to retrieve the top-k documents from D relevant to c. Our retrieval pipeline consists of two broad categories of retrieval systems, namely Sparse Retrieval and Dense Retrieval.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Document Retrieval", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "1. Sparse Retrieval Model: Over the years, lexical approaches like TF-IDF and BM25 have dominated textual information retrieval. We also utilize the BM25 scoring function (Robertson et al., 1995) as the backbone model for sparse retrieval. We use the sparse retrievers for both the ClaVer as well as CORD-19 datasets. In this case, we also provide an extra option of getting finer-grained results. This step scans through the retrieved article and provides a relevant part of the article. We use a BioBERT (Lee et al., 2019) language model which is pre-trained on largescale bio-medical corpora. We compute the hidden representation of each paragraph in the article using the language model and calculate its cosine similarity with the hidden representation of the claim. The paragraph with the highest value is then selected.", |
|
"cite_spans": [ |
|
{ |
|
"start": 171, |
|
"end": 195, |
|
"text": "(Robertson et al., 1995)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 506, |
|
"end": 524, |
|
"text": "(Lee et al., 2019)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Document Retrieval", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "2. Dense and Hybrid Retrieval Models: More recently, dense retrieval approaches were pro-posed to get better retrieval results. They are capable of capturing semantic matches and try to overcome the (potential) lexical gap. Dense retrievers map queries and documents in a shared, dense vector space (Gillick et al., 2018) . This allowed the document representation to be pre-computed and indexed. We provide the option of dense retrievers specifically for our ClaVer dataset. Using dense indexes for CORD-19 dataset is difficult because of the huge size of the corpora. To use the dense and hybrid searchers, we first index our ClaVer data using the FAISS (Johnson et al., 2017) library. For our dense retriever, we use the simple dense searcher provided by the PYSERINI library while initializing it with COVID-BERT weights. The hybrid searcher uses a combination of sparse and dense retrievers and computes a weighted interpolation of the individual results to arrive at the final rankings. We use the TCT-ColBERT (Lin et al., 2020) architecture to encode our queries into the same representation space as the encoded documents.", |
|
"cite_spans": [ |
|
{ |
|
"start": 299, |
|
"end": 321, |
|
"text": "(Gillick et al., 2018)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 656, |
|
"end": 678, |
|
"text": "(Johnson et al., 2017)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1016, |
|
"end": 1034, |
|
"text": "(Lin et al., 2020)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Document Retrieval", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Given a claim and the evidence gathered through document retrieval system, veracity prediction module seeks to establish the evidence's credibility in terms of a veracity score. To verify the veracity of our retrieved articles, we leverage a BART-based Table 2 : Sample response generated by our proposed system leveraging ClaVer dataset for extraction.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 253, |
|
"end": 260, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Veracity Prediction", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Story about how #HydroxyChloroquine likely help people recover from #Coronavirus. IMO, it was never touted as the cure but as option for treatment doctors should consider and it appears to work in some cases....39 in one place.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Claim", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Evidence Retrieved 8 Label Veracity Ours", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Outputs Technique", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Chloroquine and hydroxychloroquine, a pair of old drugs used to treat and prevent malaria, are the latest compounds to be thrust into the limelight as people tout them as treatments for the novel coronavirus. On Sunday, March 29, the US Department of Health and Human Services accepted 30 million doses of hydroxychloroquine sulfate from Novartis and 1 million doses of chloroquine phosphate from Bayer...The World Health Organization is sponsoring a large international clinical trial called SOLIDARITY to study six drugs that could be rapidly deployed for the fight the coronavirus, including chloroquine and hydroxychloroquine.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Outputs Technique", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As of now, no study says coronavirus can be cured by drinking lots of water or gargling with warm saltwater. Though it is true that warm salt water has long been used as a home remedy to soothe a sore throat, but till now, there is no evidence that it can also ward off the novel coronavirus. A report by factcheck website \"Snopes\" also says that there is no proof that coronavirus remains in the throat for four days as mentioned in the viral post.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dense", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As of now, no study says coronavirus can be cured by drinking lots of water or gargling with warm saltwater. Though it is true that warm salt water has long been used as a home remedy to soothe a sore throat, but till now, there is no esidence that it can also ward off the novel coronavirus. A report by factcheck website \"Snopes\" also says that there is no proof that coronavirus remains in the throat for four days as mentioned in the viral post. NEUTRAL 0.99825 (Lewis et al., 2020) Natural Language Inference (NLI) model that returns one of the three classes for each claim-evidence pair: Entailment, Neutral and Contradiction (as shown in Table 2 ). The mapping of these labels with our use case is done in the following way:", |
|
"cite_spans": [ |
|
{ |
|
"start": 466, |
|
"end": 486, |
|
"text": "(Lewis et al., 2020)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 645, |
|
"end": 652, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Hybrid", |
|
"sec_num": "0.99825" |
|
}, |
|
{ |
|
"text": "\u2022 If the model outputs 'Entailment', it means that the given claim's veracity can be positively supported by the retrieved article.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hybrid", |
|
"sec_num": "0.99825" |
|
}, |
|
{ |
|
"text": "\u2022 If the model outputs 'Contradiction', it means that the given claim's veracity is refuted by the retrieved article which makes the claim dubious.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hybrid", |
|
"sec_num": "0.99825" |
|
}, |
|
{ |
|
"text": "\u2022 If the model outputs 'Neutral', it means the retrieved article does not provide enough evidence to either support or refute the claim.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hybrid", |
|
"sec_num": "0.99825" |
|
}, |
|
{ |
|
"text": "We compare the findings of our retrieval system BM25 to those of other existing systems. We employ a collection of claims and ground-truth labels from our ClaVer dataset for quantitative evaluation. The test data set consists of claims excluded from the knowledge base in the retrieval phase. For this, we develop a manually annotated dataset with \u223c 1000 claims obtained from Twitter and build a knowledge-base of \u223c 400 articles from reliable sources, equipping a testing ground to validate the results. Table 3 presents experimental results based on Normalized Discounted Cumulative Gain (NDCG@k) scores, Mean Average Precision (MAP@k) and Mean Average Recall (MAR@k) scores for different values of k. We find that using BM25 outperforms all other baseline systems for retrieval task. The NDCG@100 score of the BM25 Link of the Document: https://oypost.com/2020/01/29/no-corona virus-isnt-linked-to-corona-beer-cantbe-cured-with-bleach/ Despite the catchy name, no,.\u2026", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 504, |
|
"end": 511, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The article belongs to neutral category with a confidence of 0.9982", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Link of the Document: https://oypost.com/2020/01/29/no-corona virus-isnt-linked-to-corona-beer-cantbe-cured-with-bleach/ Despite the catchy name, no, coronavirus has nothing to do...", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The article belongs to neutral category with a confidence of 0.9982", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Link of the Document: https://www.who.int/news-room9-3-detail/ herd-immunitylockdowns-andcovid-19", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Herd immunity, also known as 'population immunity, is the indirect protection from an infectious d\u2026", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The article belongs to neutral category with a confidence of 0.9903", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Dense Search CLAVER Hybrid Search", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CORD-19 CLAVER", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Figure 2: User-interface of our proposed tool after the claim has been submitted.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BM25 BM25", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "retrieval model improves the baseline method by more than 10% out of the whole testing set. We find that BM25 detects relevant snippets with higher precision and recall than other existing retrieval systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BM25 BM25", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this section, we demonstrate how our proposed claim verification pipeline works. Figure 2 depicts an example claim as well as the model's output results. Users enter a claim into our system as a query, and the system evaluates whether or not it is a validated claim. In practice, the system takes somewhere around 20 and 80 seconds to execute a single user query, depending on the number and length of articles obtained by the search engine. The input section of our tool, as shown in Figure 2 , provides a query text box where the user can enter any natural language text as an input claim for evaluation, as well as a specific configuration to limit the number of articles to be retrieved. Following the submission of the claim, the tool's back-end server does its analysis. It returns three sets of outputs: (i) a set of articles employing the various approaches, (ii) a claim category, and (iii) a veracity score. The output also presents the technique utilized for retrieval (pink) and from which knowledge base the shreds of evidence were extracted (blue). The most intriguing aspect of the system is that it links resources from the web, where the article was retrieved, allowing individuals to make their own decisions based on them.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 84, |
|
"end": 92, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 488, |
|
"end": 497, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Demonstration", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Not all information is equally reliable, and sometimes even the trusted sources contradict one another. This calls into question the assumptions behind most current fact-checking research, which relies on a single authoritative source. As a result, we offer results for a common claim from several models and knowledge bases. For demonstration, we practice the widely spread claim \"Vaccines are not effective against COVID-19\" as an input as shown in Figure 2 , and the tool returned the top-ranked shreds of evidence. The first two pieces of evidence come from the BM25 model, which was run on the CORD-19 dataset and our data, respectively. Furthermore, evidences 3 and 4 collected articles from our dataset using a dense and hybrid retrieval strategy, respectively. We can see that all four pieces of evidence assigned the same label to the claim, but their truthfulness scores differed from each other.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 451, |
|
"end": 459, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Demonstration", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In this work, we verged upon claim verification on online social media towards coping with misinformation. We bestowed a claim verification system that evaluates the authenticity of a usersupplied query claim and justifies the verdict corroborating evidence. We explored multiple retrieval methodologies and published user research findings, demonstrating the utility of the BM25 method. Unlike other tools, our system learns the distributed representations to encapsulate the semantic relations between the claim and the evidence. Our approach uses a two-step training process to provide a high-quality veracity score as well as best-suited articles, leveraging data from formal articles and web-based informal texts. We have made the source codes and the dataset public at the following link: https://github.com/ LCS2-IIITD/claim_verification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "https://www.who.int/health-topics/ infodemic 2 https://github.com/castorini/pyserini 3 https://github.com/UKPLab/beir 4 https://allenai.org/data/cord-19", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://idir.uta.edu/claimbuster/api/ 6 https://toolbox.google.com/factcheck/ explorer", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://huggingface.co/transformers/ model_doc/t5.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Links of article sources can be found at: https:// cutt.ly/lFwsxXa", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "T. Chakraborty would like to acknowledge the support of the Ramanujan Fellowship, and ihub-Anubhuti-iiitd Foundation set up under the NM-ICPS scheme of the Department of Science and Technology, India. M. S. Akhtar and T. Chakraborty thank Infosys Centre for AI at IIIT-Delhi for the valuable support.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Social media and fake news in the 2016 election", |
|
"authors": [ |
|
{ |
|
"first": "Hunt", |
|
"middle": [], |
|
"last": "Allcott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Gentzkow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Journal of economic perspectives", |
|
"volume": "31", |
|
"issue": "2", |
|
"pages": "211--247", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hunt Allcott and Matthew Gentzkow. 2017. Social media and fake news in the 2016 election. Journal of economic perspectives, 31(2):211-36.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Covid-19 pushes up internet use 70% and streaming more than 12%", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Beech", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Beech. 2020. Covid-19 pushes up internet use 70% and streaming more than 12%, first figures re- veal.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Computation-al journalism: A call to arms to database research-ers", |
|
"authors": [ |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengkai", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "5th Biennial Conference on Innovative Data Systems Research", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarah Cohen, Chengkai Li, Jun Yang, and Cong Yu. 2011. Computation-al journalism: A call to arms to database research-ers. In 5th Biennial Conference on Innovative Data Systems Research, ACM.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "2020. 'hundreds dead' because of covid-19 misinformation", |
|
"authors": [ |
|
{ |
|
"first": "Alistair", |
|
"middle": [], |
|
"last": "Coleman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alistair Coleman. 2020. 'hundreds dead' because of covid-19 misinformation.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Knowledge-based trust: Estimating the trustworthiness of web sources", |
|
"authors": [ |
|
{ |
|
"first": "Evgeniy", |
|
"middle": [], |
|
"last": "Xin Luna Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gabrilovich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Van", |
|
"middle": [], |
|
"last": "Murphy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wilko", |
|
"middle": [], |
|
"last": "Dang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Camillo", |
|
"middle": [], |
|
"last": "Horn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaohua", |
|
"middle": [], |
|
"last": "Lugaresi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xin Luna Dong, Evgeniy Gabrilovich, Kevin Murphy, Van Dang, Wilko Horn, Camillo Lugaresi, Shaohua Sun, and Wei Zhang. 2015. Knowledge-based trust: Estimating the trustworthiness of web sources.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "The promise of computational journalism", |
|
"authors": [ |
|
{ |
|
"first": "Terry", |
|
"middle": [], |
|
"last": "Flew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christina", |
|
"middle": [], |
|
"last": "Spurgeon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Daniel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Swift", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Journalism practice", |
|
"volume": "6", |
|
"issue": "2", |
|
"pages": "157--171", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Terry Flew, Christina Spurgeon, Anna Daniel, and Adam Swift. 2012. The promise of computational journalism. Journalism practice, 6(2):157-171.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "End-to-end retrieval in continuous space", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Gillick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Presta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gaurav Singh", |
|
"middle": [], |
|
"last": "Tomar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1811.08008" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Gillick, Alessandro Presta, and Gaurav Singh Tomar. 2018. End-to-end retrieval in continuous space. arXiv preprint arXiv:1811.08008.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Learning word vectors for 157 languages", |
|
"authors": [ |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prakhar", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1802.06893" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Edouard Grave, Piotr Bojanowski, Prakhar Gupta, Ar- mand Joulin, and Tomas Mikolov. 2018. Learn- ing word vectors for 157 languages. arXiv preprint arXiv:1802.06893.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Text analysis in adversarial settings: Does deception leave a stylistic trace", |
|
"authors": [ |
|
{ |
|
"first": "Tommi", |
|
"middle": [], |
|
"last": "Gr\u00f6ndahl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Asokan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ACM Computing Surveys (CSUR)", |
|
"volume": "52", |
|
"issue": "3", |
|
"pages": "1--36", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tommi Gr\u00f6ndahl and N Asokan. 2019. Text analysis in adversarial settings: Does deception leave a stylistic trace? ACM Computing Surveys (CSUR), 52(3):1- 36.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Lesa: Linguistic encapsulation and semantic amalgamation based generalised claim detection from online content", |
|
"authors": [ |
|
{ |
|
"first": "Shreya", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Parantak", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Megha", |
|
"middle": [], |
|
"last": "Sundriyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shad", |
|
"middle": [], |
|
"last": "Md", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tanmoy", |
|
"middle": [], |
|
"last": "Akhtar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Chakraborty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 16th Conference of the EACL: Main Volume", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3178--3188", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shreya Gupta, Parantak Singh, Megha Sundriyal, Md Shad Akhtar, and Tanmoy Chakraborty. 2021. Lesa: Linguistic encapsulation and semantic amalga- mation based generalised claim detection from online content. In Proceedings of the 16th Conference of the EACL: Main Volume, pages 3178-3188.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "UKP-athene: Multi-sentence textual entailment for claim verification", |
|
"authors": [ |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Hanselowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zile", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniil", |
|
"middle": [], |
|
"last": "Sorokin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Schiller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claudia", |
|
"middle": [], |
|
"last": "Schulz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the First Workshop on Fact Extraction and VERification (FEVER)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andreas Hanselowski, Hao Zhang, Zile Li, Daniil Sorokin, Benjamin Schiller, Claudia Schulz, and Iryna Gurevych. 2018. UKP-athene: Multi-sentence textual entailment for claim verification. In Proceed- ings of the First Workshop on Fact Extraction and VERification (FEVER), Brussels, Belgium. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Billion-scale similarity search with gpus", |
|
"authors": [ |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthijs", |
|
"middle": [], |
|
"last": "Douze", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Herv\u00e9", |
|
"middle": [], |
|
"last": "J\u00e9gou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1702.08734" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeff Johnson, Matthijs Douze, and Herv\u00e9 J\u00e9gou. 2017. Billion-scale similarity search with gpus. arXiv preprint arXiv:1702.08734.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Mvae: Multimodal variational autoencoder for fake news detection", |
|
"authors": [ |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Khattar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manish", |
|
"middle": [], |
|
"last": "Singh Goud", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vasudeva", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Varma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "The world wide web conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2915--2921", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dhruv Khattar, Jaipal Singh Goud, Manish Gupta, and Vasudeva Varma. 2019. Mvae: Multimodal varia- tional autoencoder for fake news detection. In The world wide web conference, pages 2915-2921.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Disinformation on the web: Impact, characteristics, and detection of wikipedia hoaxes", |
|
"authors": [ |
|
{ |
|
"first": "Srijan", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "West", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jure", |
|
"middle": [], |
|
"last": "Leskovec", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 25th international conference on World Wide Web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "591--602", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Srijan Kumar, Robert West, and Jure Leskovec. 2016. Disinformation on the web: Impact, characteristics, and detection of wikipedia hoaxes. In Proceedings of the 25th international conference on World Wide Web, pages 591-602.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "BioBERT: a pre-trained biomedical language representation model for biomedical text mining", |
|
"authors": [ |
|
{ |
|
"first": "Jinhyuk", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wonjin", |
|
"middle": [], |
|
"last": "Yoon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sungdong", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Donghyeon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sunkyu", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chan", |
|
"middle": [], |
|
"last": "Ho So", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaewoo", |
|
"middle": [], |
|
"last": "Kang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Bioinformatics", |
|
"volume": "36", |
|
"issue": "4", |
|
"pages": "1234--1240", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan Ho So, and Jaewoo Kang. 2019. BioBERT: a pre-trained biomedical language representation model for biomedical text mining. Bioinformatics, 36(4):1234-1240.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marjan", |
|
"middle": [], |
|
"last": "Ghazvininejad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abdelrahman", |
|
"middle": [], |
|
"last": "Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the ACL, Online. ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and com- prehension. In Proceedings of the 58th Annual Meet- ing of the ACL, Online. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Pyserini: An easy-to-use python toolkit to support replicable ir research with sparse and dense representations", |
|
"authors": [ |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xueguang", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sheng-Chieh", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jheng-Hong", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ronak", |
|
"middle": [], |
|
"last": "Pradeep", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rodrigo", |
|
"middle": [], |
|
"last": "Nogueira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jimmy Lin, Xueguang Ma, Sheng-Chieh Lin, Jheng- Hong Yang, Ronak Pradeep, and Rodrigo Nogueira. 2021. Pyserini: An easy-to-use python toolkit to support replicable ir research with sparse and dense representations.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Distilling dense representations for ranking using tightly-coupled teachers", |
|
"authors": [ |
|
{ |
|
"first": "Jheng-Hong", |
|
"middle": [], |
|
"last": "Sheng-Chieh Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sheng-Chieh Lin, Jheng-Hong Yang, and Jimmy Lin. 2020. Distilling dense representations for ranking using tightly-coupled teachers.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Sentence-level evidence embedding for claim verification with hierarchical attention networks", |
|
"authors": [ |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shafiq", |
|
"middle": [], |
|
"last": "Joty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kam-Fai", |
|
"middle": [], |
|
"last": "Wong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jing Ma, Wei Gao, Shafiq Joty, and Kam-Fai Wong. 2019. Sentence-level evidence embedding for claim verification with hierarchical attention networks. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "The covid-19 'infodemic': a new front for information professionals", |
|
"authors": [ |
|
{ |
|
"first": "Salman", |
|
"middle": [], |
|
"last": "Bin Naeem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rubina", |
|
"middle": [], |
|
"last": "Bhatti", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Health Information & Libraries Journal", |
|
"volume": "37", |
|
"issue": "3", |
|
"pages": "233--239", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Salman Bin Naeem and Rubina Bhatti. 2020. The covid- 19 'infodemic': a new front for information profes- sionals. Health Information & Libraries Journal, 37(3):233-239.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Language-aware truth assessment of fact candidates", |
|
"authors": [ |
|
{ |
|
"first": "Ndapandula", |
|
"middle": [], |
|
"last": "Nakashole", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 52nd Annual Meeting of the ACL", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1009--1019", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ndapandula Nakashole and Tom Mitchell. 2014. Language-aware truth assessment of fact candidates. In Proceedings of the 52nd Annual Meeting of the ACL (Volume 1: Long Papers), pages 1009-1019.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "ScispaCy: Fast and Robust Models for Biomedical Natural Language Processing", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "King", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iz", |
|
"middle": [], |
|
"last": "Beltagy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Waleed", |
|
"middle": [], |
|
"last": "Ammar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 18th BioNLP Workshop and Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "319--327", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-5034" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Neumann, Daniel King, Iz Beltagy, and Waleed Ammar. 2019. ScispaCy: Fast and Robust Models for Biomedical Natural Language Processing. In Pro- ceedings of the 18th BioNLP Workshop and Shared Task, pages 319-327, Florence, Italy. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Fighting misinformation on social media using crowdsourced judgments of news source quality", |
|
"authors": [ |
|
{ |
|
"first": "Gordon", |
|
"middle": [], |
|
"last": "Pennycook", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "David", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rand", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the National Academy of Sciences", |
|
"volume": "116", |
|
"issue": "", |
|
"pages": "2521--2526", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gordon Pennycook and David G Rand. 2019. Fighting misinformation on social media using crowdsourced judgments of news source quality. Proceedings of the National Academy of Sciences, 116(7):2521-2526.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Declare: Debunking fake news and false claims using evidence-aware deep learning", |
|
"authors": [ |
|
{ |
|
"first": "Kashyap", |
|
"middle": [], |
|
"last": "Popat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Subhabrata", |
|
"middle": [], |
|
"last": "Mukherjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Yates", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Weikum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1809.06416" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kashyap Popat, Subhabrata Mukherjee, Andrew Yates, and Gerhard Weikum. 2018. Declare: Debunking fake news and false claims using evidence-aware deep learning. arXiv preprint arXiv:1809.06416.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Scientific claim verification with VerT5erini", |
|
"authors": [ |
|
{ |
|
"first": "Ronak", |
|
"middle": [], |
|
"last": "Pradeep", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xueguang", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rodrigo", |
|
"middle": [], |
|
"last": "Nogueira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 12th International Workshop on Health Text Mining and Information Analysis, online. ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ronak Pradeep, Xueguang Ma, Rodrigo Nogueira, and Jimmy Lin. 2021. Scientific claim verification with VerT5erini. In Proceedings of the 12th International Workshop on Health Text Mining and Information Analysis, online. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Rumor has it: Identifying misinformation in microblogs", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Vahed Qazvinian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dragomir", |
|
"middle": [], |
|
"last": "Rosengren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiaozhu", |
|
"middle": [], |
|
"last": "Radev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1589--1599", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vahed Qazvinian, Emily Rosengren, Dragomir Radev, and Qiaozhu Mei. 2011. Rumor has it: Identifying misinformation in microblogs. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 1589-1599.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Okapi at trec-3", |
|
"authors": [ |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Stephen E Robertson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Susan", |
|
"middle": [], |
|
"last": "Walker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Micheline", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Hancock-Beaulieu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gatford", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Nist Special Publication Sp", |
|
"volume": "109", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen E Robertson, Steve Walker, Susan Jones, Micheline M Hancock-Beaulieu, Mike Gatford, et al. 1995. Okapi at trec-3. Nist Special Publication Sp, 109:109.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Automatic Keyword Extraction from Individual Documents, chapter 1", |
|
"authors": [ |
|
{ |
|
"first": "Stuart", |
|
"middle": [], |
|
"last": "Rose", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dave", |
|
"middle": [], |
|
"last": "Engel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Cramer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wendy", |
|
"middle": [], |
|
"last": "Cowley", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stuart Rose, Dave Engel, Nick Cramer, and Wendy Cowley. 2010. Automatic Keyword Extraction from Individual Documents, chapter 1. John Wiley Sons, Ltd.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Beyond news contents: The role of social context for fake news detection", |
|
"authors": [ |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Shu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suhang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the twelfth ACM international conference on web search and data mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "312--320", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kai Shu, Suhang Wang, and Huan Liu. 2019. Beyond news contents: The role of social context for fake news detection. In Proceedings of the twelfth ACM international conference on web search and data mining, pages 312-320.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Cairecovid: A question answering and query-focused multi-document summarization system for covid-19 scholarly information management", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tiezheng", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Farhad", |
|
"middle": [], |
|
"last": "Bin Siddique", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elham", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Barezi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascale", |
|
"middle": [], |
|
"last": "Fung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dan Su, Yan Xu, Tiezheng Yu, Farhad Bin Siddique, Elham J. Barezi, and Pascale Fung. 2020. Caire- covid: A question answering and query-focused multi-document summarization system for covid-19 scholarly information management.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Christos Christodoulopoulos, and Arpit Mittal", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Thorne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Vlachos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Fever: a large-scale dataset for fact extraction and verification", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1803.05355" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Thorne, Andreas Vlachos, Christos Christodoulopoulos, and Arpit Mittal. 2018a. Fever: a large-scale dataset for fact extraction and verification. arXiv preprint arXiv:1803.05355.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Christos Christodoulopoulos, and Arpit Mittal. 2018b. The fact extraction and VERification (FEVER) shared task", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Thorne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Vlachos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oana", |
|
"middle": [], |
|
"last": "Cocarascu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the First Workshop on Fact Extraction and VERification (FEVER)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Thorne, Andreas Vlachos, Oana Cocarascu, Christos Christodoulopoulos, and Arpit Mittal. 2018b. The fact extraction and VERification (FEVER) shared task. In Proceedings of the First Workshop on Fact Extraction and VERification (FEVER), Brussels, Belgium. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Fact checking: Task definition and dataset construction", |
|
"authors": [ |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Vlachos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the ACL 2014 Workshop on Language Technologies and Computational Social Science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andreas Vlachos and Sebastian Riedel. 2014. Fact checking: Task definition and dataset construction. In Proceedings of the ACL 2014 Workshop on Lan- guage Technologies and Computational Social Sci- ence, Baltimore, MD, USA. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "liar, liar pants on fire\": A new benchmark dataset for fake news detection", |
|
"authors": [ |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wang", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the ACL", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William Yang Wang. 2017. \"liar, liar pants on fire\": A new benchmark dataset for fake news detection. In Proceedings of the 55th Annual Meeting of the ACL (Volume 2: Short Papers), Vancouver, Canada. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Modeling conversation structure and temporal dynamics for jointly predicting rumor stance and veracity", |
|
"authors": [ |
|
{ |
|
"first": "Penghui", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenji", |
|
"middle": [], |
|
"last": "Mao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1909.08211" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Penghui Wei, Nan Xu, and Wenji Mao. 2019. Modeling conversation structure and temporal dynamics for jointly predicting rumor stance and veracity. arXiv preprint arXiv:1909.08211.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Adaptive interaction fusion networks for fake news detection", |
|
"authors": [ |
|
{ |
|
"first": "Lianwei", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuan", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.10009" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lianwei Wu and Yuan Rao. 2020. Adaptive interac- tion fusion networks for fake news detection. arXiv preprint arXiv:2004.10009.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Claimverif: a real-time claim verification system using the web and fact databases", |
|
"authors": [ |
|
{ |
|
"first": "Yicheng", |
|
"middle": [], |
|
"last": "Shi Zhi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiayi", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chao", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiawei", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 ACM on Conference on Information and Knowledge Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2555--2558", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shi Zhi, Yicheng Sun, Jiayi Liu, Chao Zhang, and Jiawei Han. 2017. Claimverif: a real-time claim verification system using the web and fact databases. In Proceed- ings of the 2017 ACM on Conference on Information and Knowledge Management, pages 2555-2558.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF0": { |
|
"text": "Examples from ClaVer dataset along with the evidence and corresponding labels. 8", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF2": { |
|
"text": "Performance of various retrieval techniques on ClaVer dataset. (NDCG: Normalized Discounted Cumulative Gain, MAP: Mean Average Precision and MAR: Mean Average Recall)", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td>Technique Ours</td><td colspan=\"7\">NDCG@1 NDCG@10 NDCG@100 MAP@1 MAP@10 MAR@1 MAR@10 24.71 36.75 45.73 24.71 32.14 24.71 51.72</td></tr><tr><td>CrossEncoder MS Marco</td><td>22.99</td><td>35.41</td><td>35.41</td><td>22.99</td><td>31.12</td><td>22.99</td><td>48.85</td></tr><tr><td>CrossEncoder CovidBERT</td><td>3.41</td><td>15.04</td><td>15.04</td><td>3.41</td><td>3.41</td><td>3.49</td><td>36.36</td></tr><tr><td>SentenceBERT MS Marco</td><td>18.97</td><td>32.09</td><td>32.58</td><td>18.97</td><td>26.83</td><td>18.97</td><td>49.43</td></tr><tr><td colspan=\"2\">Vaccines are not effective against COVID-19</td><td/><td/><td/><td/><td/><td>Submit</td></tr><tr><td>Evidence 1</td><td/><td/><td>Evidence 2</td><td/><td/><td/><td/></tr><tr><td colspan=\"3\">Link of the Document: https://doi.org10.108021645515 2020.</td><td/><td/><td/><td/><td/></tr><tr><td>1735227..</td><td/><td/><td/><td/><td/><td/><td/></tr><tr><td colspan=\"3\">COVID-19, an emerging coronavirus infection advances and</td><td/><td/><td/><td/><td/></tr><tr><td colspan=\"3\">prospects in designing and developing vaccines, immuno...</td><td/><td/><td/><td/><td/></tr><tr><td colspan=\"3\">The article belongs to neutral category with a confidence of 0.9328</td><td/><td/><td/><td/><td/></tr><tr><td>Evidence 3</td><td/><td/><td>Evidence 4</td><td/><td/><td/><td/></tr></table>" |
|
} |
|
} |
|
} |
|
} |