|
{ |
|
"paper_id": "Q17-1027", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:12:24.030075Z" |
|
}, |
|
"title": "Ordinal Common-sense Inference", |
|
"authors": [ |
|
{ |
|
"first": "Sheng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Johns Hopkins University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Rudinger", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Johns Hopkins University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Duh", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Johns Hopkins University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Johns Hopkins University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Humans have the capacity to draw commonsense inferences from natural language: various things that are likely but not certain to hold based on established discourse, and are rarely stated explicitly. We propose an evaluation of automated common-sense inference based on an extension of recognizing textual entailment: predicting ordinal human responses on the subjective likelihood of an inference holding in a given context. We describe a framework for extracting common-sense knowledge from corpora, which is then used to construct a dataset for this ordinal entailment task. We train a neural sequence-to-sequence model on this dataset, which we use to score and generate possible inferences. Further, we annotate subsets of previously established datasets via our ordinal annotation protocol in order to then analyze the distinctions between these and what we have constructed.", |
|
"pdf_parse": { |
|
"paper_id": "Q17-1027", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Humans have the capacity to draw commonsense inferences from natural language: various things that are likely but not certain to hold based on established discourse, and are rarely stated explicitly. We propose an evaluation of automated common-sense inference based on an extension of recognizing textual entailment: predicting ordinal human responses on the subjective likelihood of an inference holding in a given context. We describe a framework for extracting common-sense knowledge from corpora, which is then used to construct a dataset for this ordinal entailment task. We train a neural sequence-to-sequence model on this dataset, which we use to score and generate possible inferences. Further, we annotate subsets of previously established datasets via our ordinal annotation protocol in order to then analyze the distinctions between these and what we have constructed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "We use words to talk about the world. Therefore, to understand what words mean, we must have a prior explication of how we view the world. -Hobbs (1987) Researchers in Artificial Intelligence and (Computational) Linguistics have long-cited the requirement of common-sense knowledge in language understanding. 1 This knowledge is viewed as a key", |
|
"cite_spans": [ |
|
{ |
|
"start": 139, |
|
"end": 152, |
|
"text": "-Hobbs (1987)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "1 Schank (1975) : It has been apparent ... within ... natural language understanding ... that the eventual limit to our solution ... would be our ability to characterize world knowledge.", |
|
"cite_spans": [ |
|
{ |
|
"start": 2, |
|
"end": 15, |
|
"text": "Schank (1975)", |
|
"ref_id": "BIBREF64" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Dave found an axe in his garage A car is parked in the garage Tom was accidentally shot by his teammate in the army The teammate dies Two friends were in a heated game of checkers A person shoots the checkers My friends and I decided to go swimming in the ocean The ocean is carbonated component in filling in the gaps between the telegraphic style of natural language statements. We are able to convey considerable information in a relatively sparse channel, presumably owing to a partially shared model at the start of any discourse. 2 Common-sense inference -inferences based on common-sense knowledge -is possibilistic: things everyone more or less would expect to hold in a given context, but without the necessary strength of logical entailment. 3 Because natural language corpora exhibits human reporting bias (Gordon and Van Durme, 2013) , systems that derive knowledge exclusively from such corpora may be more accurately considered models of language, rather than of the world (Rudinger et al., 2015) . Facts such as \"A person walking into a room is very likely to be blinking and breathing\" are usually unstated in text, so their real-world likelihoods do not align to language model probabilities. 4 We would like to have systems capable of reading a sentence that describes a realworld situation and inferring how likely other statements about that situation are to hold true in the real world, e.g. This capability is subtly but crucially distinct from the ability to predict other sentences reported in the same text, as a language model may be trained to do.", |
|
"cite_spans": [ |
|
{ |
|
"start": 536, |
|
"end": 537, |
|
"text": "2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 752, |
|
"end": 753, |
|
"text": "3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 817, |
|
"end": 845, |
|
"text": "(Gordon and Van Durme, 2013)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 987, |
|
"end": 1010, |
|
"text": "(Rudinger et al., 2015)", |
|
"ref_id": "BIBREF61" |
|
}, |
|
{ |
|
"start": 1210, |
|
"end": 1211, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sam bought a new clock The clock runs", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We therefore propose a model of knowledge acquisition based on first deriving possibilistic statements from text. As the relative frequency of these statements suffers the mentioned reporting bias, we then follow up with human annotation of derived examples. Since we initially are uncertain about the real-world likelihood of the derived common-sense knowledge holding in any particular context, we pair it with various grounded context and present to humans for their own assessment. As these examples vary in assessed plausibility, we propose the task of ordinal common-sense inference, which embraces a wider set of natural conclusions arising from language comprehension (see Fig 1) .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 681, |
|
"end": 687, |
|
"text": "Fig 1)", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sam bought a new clock The clock runs", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In what follows, we describe prior efforts in common-sense and textual inference ( \u00a72). We then state our position on how ordinal common-sense inference should be defined ( \u00a73), and detail our own framework for large-scale extraction and abstraction, along with a crowdsourcing protocol for assessment ( \u00a74). This includes a novel neural model for forward generation of textual inference statements. Together these methods are applied to contexts derived from various prior textual inference resources, resulting in the JHU Ordinal Common-sense Inference (JOCI) corpus, a large collection of diverse common-sense inference examples, judged to hold with varying levels of subjective likelihood ( \u00a75). We provide baseline results ( \u00a76) for prediction on the JOCI corpus. 5", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sam bought a new clock The clock runs", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Mining Common Sense Building large collections of common-sense knowledge can be done manually via professionals (Hobbs and Navarretta, 1993) , but at considerable cost in terms of time and expense (Miller, 1995; Lenat, 1995; Baker et al., 1998; Friedland et al., 2004) . Efforts have pursued volunteers (Singh, 2002; Havasi et al., 2007) and games with a purpose (Chklovski, 2003) , but are still left fully reliant on human labor. Many have pursued automating the process, such as in expanding lexical hierarchies (Hearst, 1992; Snow et al., 2006) , constructing inference patterns (Lin and Pantel, 2001; Berant et al., 2011) , reading reference materials (Richardson et al., 1998; Suchanek et al., 2007) , mining search engine query logs (Pa\u015fca and Van Durme, 2007) , and most relevant here: abstracting from instance-level predications discovered in descriptive texts (Schubert, 2002; Liakata and Pulman, 2002; Clark et al., 2003; Banko and Etzioni, 2007) . In this article we are concerned with knowledge mining for purposes of seeding a text generation process (constructing common-sense inference examples).", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 140, |
|
"text": "(Hobbs and Navarretta, 1993)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 197, |
|
"end": 211, |
|
"text": "(Miller, 1995;", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 212, |
|
"end": 224, |
|
"text": "Lenat, 1995;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 225, |
|
"end": 244, |
|
"text": "Baker et al., 1998;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 245, |
|
"end": 268, |
|
"text": "Friedland et al., 2004)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 303, |
|
"end": 316, |
|
"text": "(Singh, 2002;", |
|
"ref_id": "BIBREF66" |
|
}, |
|
{ |
|
"start": 317, |
|
"end": 337, |
|
"text": "Havasi et al., 2007)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 363, |
|
"end": 380, |
|
"text": "(Chklovski, 2003)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 515, |
|
"end": 529, |
|
"text": "(Hearst, 1992;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 530, |
|
"end": 548, |
|
"text": "Snow et al., 2006)", |
|
"ref_id": "BIBREF67" |
|
}, |
|
{ |
|
"start": 583, |
|
"end": 605, |
|
"text": "(Lin and Pantel, 2001;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 606, |
|
"end": 626, |
|
"text": "Berant et al., 2011)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 657, |
|
"end": 682, |
|
"text": "(Richardson et al., 1998;", |
|
"ref_id": "BIBREF59" |
|
}, |
|
{ |
|
"start": 683, |
|
"end": 705, |
|
"text": "Suchanek et al., 2007)", |
|
"ref_id": "BIBREF68" |
|
}, |
|
{ |
|
"start": 740, |
|
"end": 767, |
|
"text": "(Pa\u015fca and Van Durme, 2007)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 871, |
|
"end": 887, |
|
"text": "(Schubert, 2002;", |
|
"ref_id": "BIBREF65" |
|
}, |
|
{ |
|
"start": 888, |
|
"end": 913, |
|
"text": "Liakata and Pulman, 2002;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 914, |
|
"end": 933, |
|
"text": "Clark et al., 2003;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 934, |
|
"end": 958, |
|
"text": "Banko and Etzioni, 2007)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Common-sense Tasks Many textual inference tasks have been designed to require some degree of common-sense knowledge, e.g., the Winograd Schema Challenge discussed by Levesque et al. (2011) . The data for these tasks are either smaller, carefully constructed evaluation sets by professionals, following efforts like the FRACAS test suite (Cooper et al., 1996) , or they rely on crowdsourced elicitation (Bowman et al., 2015) . Crowdsourcing is scalable, but elicitation protocols can lead to biased responses unlikely to contain a wide range of possible common-sense inferences. Humans can generally agree on the plausibility of a wide range of possible inference pairs, but they are not likely to generate them from an initial prompt. 6 The construction of SICK (Sentences Involving Compositional Knowledge) made use of existing paraphrastic sentence pairs (descriptions by differ-ent people of the same image), which were modified through a series of rule-based transformations then judged by humans (Marelli et al., 2014) . As with SICK, we rely on humans only for judging provided examples, rather than elicitation of text. Unlike SICK, our generation is based on a process targeted specifically at common sense (see \u00a74.1.1).", |
|
"cite_spans": [ |
|
{ |
|
"start": 166, |
|
"end": 188, |
|
"text": "Levesque et al. (2011)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 358, |
|
"text": "(Cooper et al., 1996)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 402, |
|
"end": 423, |
|
"text": "(Bowman et al., 2015)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 735, |
|
"end": 736, |
|
"text": "6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1001, |
|
"end": 1023, |
|
"text": "(Marelli et al., 2014)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Plausibility Researchers in psycholinguistics have explored a notion of plausibility in human sentence processing, where, for instance, arguments to predicates are intuitively more or less \"plausible\" as fillers to different thematic roles, as reflected in human reading times. For example, McRae et al. (1998) looked at manipulations such as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 278, |
|
"end": 310, |
|
"text": "For example, McRae et al. (1998)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "(a) The boss hired by the corporation was perfect for the job. (b) The applicant hired by the corporation was perfect for the job. where the plausibility of a boss being the agent -as compared to patient -of the predicate hired might be measured by looking at delays in reading time in the words following the predicate. This measurement is then contrasted with the timing observed in the same positions in (b). 7 Rather than measuring according to predictions such as human reading times, here we ask annotators explicitly to judge plausibility on a 5-point ordinal scale (See \u00a73). Further, our effort might be described in this setting as conditional plausibility, 8 where plausibility judgments for a given sentence are expected to be dependent on preceding context. Further exploration of conditional plausibility is an interesting avenue of potential future work, perhaps through the measurement of human reading times when using prompts derived from our ordinal common-sense inference examples. Computational modeling of (unconditional) semantic plausibility has been explored by those such as Pad\u00f3 et al. (2009) , Erk et al. (2010) and Sayeed et al. (2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 412, |
|
"end": 413, |
|
"text": "7", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1100, |
|
"end": 1118, |
|
"text": "Pad\u00f3 et al. (2009)", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 1121, |
|
"end": 1138, |
|
"text": "Erk et al. (2010)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1143, |
|
"end": 1163, |
|
"text": "Sayeed et al. (2015)", |
|
"ref_id": "BIBREF63" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Textual Entailment A multi-year source of textual inference examples were generated under the Recognizing Textual Entailment (RTE) Challenges, introduced by Dagan et al. (2006) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 170, |
|
"end": 176, |
|
"text": "(2006)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We say that T entails H if, typically, a human reading T would infer that H is most likely true. This somewhat informal definition is based on (and assumes) common human understanding of language as well as common background knowledge.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "This definition strayed from the more strict notion of entailment as used by linguistic semanticists, such as those involved with FRACAS. While Giampiccolo et al. (2008) extended binary RTE with an \"unknown\" category, the entailment community has primarily focused on issues such as \"paraphrase\" and \"monotonicity\". An example of this is the Natural Logic implementation of MacCartney and Manning (2007) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 144, |
|
"end": 169, |
|
"text": "Giampiccolo et al. (2008)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 374, |
|
"end": 403, |
|
"text": "MacCartney and Manning (2007)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Language understanding in context is not only understanding the entailments of a sentence, but also the plausible inferences of the sentence, i.e. the new posterior on the world after reading the sentence. A new sentence in a discourse is almost never entailed by another sentence in the discourse, because such a sentence would add no new information. In order to successfully process a discourse, there needs to be some understanding of what new information can be, possibly or plausibly, added to the discourse. Collecting sentence pairs with ordinal entailment connections is potentially useful for improving and testing these language understanding capabilities that would be needed by algorithms for applications like storytelling. Garrette et al. (2011) and Beltagy et al. (2017) treated textual entailment as probabilistic logical inference in Markov Logic Networks (Richardson and Domingos, 2006) . However, the notion of probability in their entailment task has a subtle distinction from our problem of common-sense inference. The probability of being an entailment given by a probabilistic model trained for a binary classification (being an entailment or not) is not necessarily the same as the likelihood of an inference being true. No human reading T should infer that H is true. A model trained to make ordinal predictions should say: \"plausible, with probability 1.0\", whereas a model trained to make binary entailed/not-entailed predictions should say: \"not entailed, with probability 1.0\". The following example exhibits the same property:", |
|
"cite_spans": [ |
|
{ |
|
"start": 738, |
|
"end": 760, |
|
"text": "Garrette et al. (2011)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 765, |
|
"end": 786, |
|
"text": "Beltagy et al. (2017)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 874, |
|
"end": 905, |
|
"text": "(Richardson and Domingos, 2006)", |
|
"ref_id": "BIBREF58" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "T: An animal eats food. H: A person eats food. Again, with high confidence, H is plausible; and, with high confidence, it is also not entailed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Non-entailing Inference Of the various non-\"entailment\" textual inference tasks, a few are most salient here. Agirre et al. (2012) piloted a Textual Similarity evaluation which has been refined in subsequent years. Systems produce scalar values corresponding to predictions of how similar the meaning is between two provided sentences, e.g., the following pair from SICK was judged very similar (4.2 out of 5), while also being a contradiction: There is no biker jumping in the air and A lone biker is jumping in the air. The ordinal approach we advocate for relies on a graded notion, like textual similarity.", |
|
"cite_spans": [ |
|
{ |
|
"start": 110, |
|
"end": 130, |
|
"text": "Agirre et al. (2012)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The Choice of Plausible Alternative (COPA) task (Roemmele et al., 2011) was a reaction to RTE, similarly motivated to probe a system's ability to understand inferences that are not strictly entailed. A single context was provided, with two alternative inferences, and a system had to judge which was more plausible. The COPA dataset was manually elicited, and is not large; we discuss this data further in \u00a75.", |
|
"cite_spans": [ |
|
{ |
|
"start": 48, |
|
"end": 71, |
|
"text": "(Roemmele et al., 2011)", |
|
"ref_id": "BIBREF60" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The Narrative Cloze task (Chambers and Jurafsky, 2008) requires a system to score candidate inferences as to how likely they are to appear in a document that also included the provided context. Many such inferences are then not strictly entailed by the context. Further, the Cloze task gives the benefit of being able to generate very large numbers of examples automatically by simply occluding parts of existing documents and asking a system to predict what is missing. The LAMBADA dataset (Paperno et al., 2016) is akin to our strategy for automatic generation followed by human filtering, but for Cloze examples. As our concern is with inferences that are often true but never stated in a document, this approach is not viable here. The ROC-Stories corpus (Mostafazadeh et al., 2016 ) elicited a more \"plausible\" collection of documents in order to retain the narrative Cloze in the context of common-sense inference. The ROCStories corpus can be viewed as an extension of the idea behind the COPA corpus, done at a larger scale with crowdsourcing, and with multi-sentence contexts; we consider this dataset in \u00a75.", |
|
"cite_spans": [ |
|
{ |
|
"start": 759, |
|
"end": 785, |
|
"text": "(Mostafazadeh et al., 2016", |
|
"ref_id": "BIBREF46" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Alongside the narrative Cloze, Pichotta and Mooney (2016) made use of a 5-point Likert scale (very likely to very unlikely) as a secondary evaluation of various script induction techniques. While they were concerned with measuring their ability to generate very likely inferences, here we are interested in generating a wide swath of inference candidates, including those that are impossible.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our goal is a system that can perform speculative, common-sense inference as part of understanding language. Based on the observed shortfalls of prior work, we propose the notion of Ordinal Commonsense Inference (OCI). OCI embraces the notion of Dagan et al. (2006) , in that we are concerned with human judgments of epistemic modality. 9", |
|
"cite_spans": [ |
|
{ |
|
"start": 246, |
|
"end": 265, |
|
"text": "Dagan et al. (2006)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ordinal Common-sense Inference", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "As agreed by many linguists, modality in natural language is a continuous category, but speakers are able to map areas of this axis into discrete values (Lyons, 1977; Horn, 1989 ; de Haan, 1997) - Saur\u00ed and Pustejovsky (2009) According to Horn (1989) , there are two scales of epistemic modality which differ in polarity (positive vs. negative polarity): certain, likely, possible and impossible, unlikely, uncertain . The Square of Opposition (SO) (Fig 2) illustrates the logical relations holding between values in the two scales. Based on their logical relations, we can make a set of exhaustive epistemic modals: very likely, likely, possible, impossible , where very likely, likely, possible lie on a single, positive Horn scale, and impossible, a complementary concept from the corresponding negative Horn scale, completes the set. In this paper, we further replace the value possible by the more fine-grained values (technically possible and plausible). This results in a 5-point scale of likelihood: very likely, likely, plausible, technically possible, impossible . The OCI task definition directly embraces subjective likelihood on such an ordinal scale. Humans are presented with a context C and asked whether a provided hypothesis H is very likely, likely, plausible, technically possible, or impossible. Furthermore, an important part of this process is the generation of H by automatic methods, which seeks to avoid the elicitation bias of many prior works. 4 Framework for collecting OCI corpus", |
|
"cite_spans": [ |
|
{ |
|
"start": 153, |
|
"end": 166, |
|
"text": "(Lyons, 1977;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 167, |
|
"end": 177, |
|
"text": "Horn, 1989", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 197, |
|
"end": 225, |
|
"text": "Saur\u00ed and Pustejovsky (2009)", |
|
"ref_id": "BIBREF62" |
|
}, |
|
{ |
|
"start": 239, |
|
"end": 250, |
|
"text": "Horn (1989)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 449, |
|
"end": 456, |
|
"text": "(Fig 2)", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ordinal Common-sense Inference", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We now describe our framework for collecting ordinal common-sense inference examples. It is natural to collect this data in two stages. In the first stage ( \u00a74.1), we automatically generate inference candidates given some context. We propose two broad approaches using either general world knowledge or neural methods. In the second stage ( \u00a74.2), we annotate these candidates with ordinal labels.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ordinal Common-sense Inference", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Our motivation for this approach was first introduced by Schubert (2002) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 72, |
|
"text": "Schubert (2002)", |
|
"ref_id": "BIBREF65" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generation based on World Knowledge", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "There is a largely untapped source of general knowledge in texts, lying at a level beneath the explicit assertional content. This knowledge consists of relationships implied to be possible in the world, or, under certain conditions, implied to be normal or commonplace in the world.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generation based on World Knowledge", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "Following Schubert (2002) and Van Durme and Schubert (2008), we define an approach for abstracting over explicit assertions derived from corpora, leading to a large-scale collection of general possibilistic statements. As shown in Fig 3, (a) Extracting propositions: First we extract a large set of propositions with predicate-argument structures from noun phrases and clauses, under which general world presumptions often lie. To achieve this goal, we use PredPatt 11 (White et al., 2016; Zhang et al., 2017) , which defines a frame-work of interpretable, language-neutral predicateargument extraction patterns from Universal Dependencies (de Marneffe et al., 2014) . Fig 3 (a) shows an example extraction. We use the Gigaword corpus (Parker et al., 2011) for extracting propositions as it is a comprehensive text archive. There exists a version containing automatically generated syntactic annotation (Ferraro et al., 2014) , which bootstraps large-scale knowledge extraction. We use PyStanfordDependencies 12 to convert constituency parses to depedency parses, from which we extract structured propositions. (b) Abstracting propositions: In this step, we abstract the propositions into a more general form. This involves lemmatization, stripping inessential modifiers and conjuncts, and replacing specific arguments with generic types. 13 This method of abstraction often yields general presumptions about the world. To reduce noise from predicate-argument extraction, we only keep 1-place and 2-place predicates after abstraction.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 25, |
|
"text": "Schubert (2002)", |
|
"ref_id": "BIBREF65" |
|
}, |
|
{ |
|
"start": 469, |
|
"end": 489, |
|
"text": "(White et al., 2016;", |
|
"ref_id": "BIBREF75" |
|
}, |
|
{ |
|
"start": 490, |
|
"end": 509, |
|
"text": "Zhang et al., 2017)", |
|
"ref_id": "BIBREF78" |
|
}, |
|
{ |
|
"start": 640, |
|
"end": 666, |
|
"text": "(de Marneffe et al., 2014)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 735, |
|
"end": 756, |
|
"text": "(Parker et al., 2011)", |
|
"ref_id": "BIBREF51" |
|
}, |
|
{ |
|
"start": 903, |
|
"end": 925, |
|
"text": "(Ferraro et al., 2014)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1339, |
|
"end": 1341, |
|
"text": "13", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 231, |
|
"end": 237, |
|
"text": "Fig 3,", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 669, |
|
"end": 678, |
|
"text": "Fig 3 (a)", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Generation based on World Knowledge", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "We further generalize individual arguments to concepts by attaching semantic-class labels to them. Here we choose WordNet (Miller, 1995) noun synsets 14 as the semantic-class set. When selecting the correct sense for an argument, we adopt a fast and relatively accurate method: always taking the first sense which is usually the most commonly used sense (Suchanek et al., 2007; Pasca, 2008) . By doing so, we attach 84 million abstracted propositions with senses, covering 43.7% (35,811/81,861) of WordNet noun senses.", |
|
"cite_spans": [ |
|
{ |
|
"start": 122, |
|
"end": 136, |
|
"text": "(Miller, 1995)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 354, |
|
"end": 377, |
|
"text": "(Suchanek et al., 2007;", |
|
"ref_id": "BIBREF68" |
|
}, |
|
{ |
|
"start": 378, |
|
"end": 390, |
|
"text": "Pasca, 2008)", |
|
"ref_id": "BIBREF52" |
|
}, |
|
{ |
|
"start": 479, |
|
"end": 494, |
|
"text": "(35,811/81,861)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generation based on World Knowledge", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "Each of these WordNet senses, then, is associated with a set of abstracted propositions. The abstracted propositions are turned into templates by replacing the sense's corresponding argument with a placeholder, similar to Van Durme et al. 2009(see Fig 3 (b) ). We remove any template associated with a sense if it occurs less than two times for that sense, 12 https://pypi.python.org/pypi/ PyStanfordDependencies 13 Using English glosses of the logical representations, abstraction of \"a long, dark corridor\" would yield \"corridor\" for example; \"a small office at the end of a long dark corridor\" would yield \"office\"; and \"Mrs. MacReady\" would yield \"person\". See Schubert (2002) for detail.", |
|
"cite_spans": [ |
|
{ |
|
"start": 665, |
|
"end": 680, |
|
"text": "Schubert (2002)", |
|
"ref_id": "BIBREF65" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 248, |
|
"end": 257, |
|
"text": "Fig 3 (b)", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Generation based on World Knowledge", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "14 In order to avoid too general senses, we set cut points at the depth of 4 (Pantel et al., 2007) to truncate the hierarchy and consider all 81,861 senses below these points.", |
|
"cite_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 98, |
|
"text": "(Pantel et al., 2007)", |
|
"ref_id": "BIBREF49" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generation based on World Knowledge", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "leaving 38 million unique templates. (c) Deriving properties via WordNet: At this step, we want to associate with each WordNet sense a set of possible properties. We employ three strategies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generation based on World Knowledge", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "The first strategy is to use a decision tree to pick out highly discriminative properties for each WordNet sense. Specifically, for each set of cohyponyms, 15 we train a decision tree using the associated templates as features. For example, in Fig 3 (c) , we train a decision tree over the cohyponyms of publication.n.01. Then the template \"person subscribe to \" would be selected as a property of magazine.n.01, and the template \"person borrow from library\" for book.n.01. The second strategy selects the most frequent templates associated with each sense as properties of that sense. The third strategy uses WordNet ISA relations to derive new properties of senses. For the sense book.n.01 and its hypernym publication.n.01, we generate a property \" be publication\". (d) Generating hypotheses: As shown in Fig 3 (d) , given a discourse context (Tanenhaus and Seidenberg, 1980), we first extract an argument of the context, then select the derived properties for the argument. Since we don't assume any specific sense for the argument, these properties could come from any of its candidate senses. We generate hypotheses by replacing the placeholder in the selected properties with the argument, and verbalizing the properties. 16", |
|
"cite_spans": [ |
|
{ |
|
"start": 156, |
|
"end": 158, |
|
"text": "15", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 244, |
|
"end": 253, |
|
"text": "Fig 3 (c)", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 808, |
|
"end": 817, |
|
"text": "Fig 3 (d)", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Generation based on World Knowledge", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "In addition to the knowledge-based methods described above, we also adapt a neural sequence-tosequence model (Vinyals et al., 2015; Bahdanau et al., 2014) to generate inference candidates given contexts. The model is trained on sentence pairs labeled \"entailment\" from the SNLI corpus (Bowman et al., 2015) (train). Here, the SNLI \"premise\" is the input (context C), and the SNLI \"hypothesis\" is the output (hypothesis H).", |
|
"cite_spans": [ |
|
{ |
|
"start": 109, |
|
"end": 131, |
|
"text": "(Vinyals et al., 2015;", |
|
"ref_id": "BIBREF74" |
|
}, |
|
{ |
|
"start": 132, |
|
"end": 154, |
|
"text": "Bahdanau et al., 2014)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generation via Neural Methods", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "We employ two different strategies for forward generation of inference candidates given any con-text. The sentence-prompt strategy uses the entire sentence in the context as an input, and generates output using greedy decoding. The word-prompt strategy differs by using only a single word from the context as input. This word is chosen in the same fashion as the step (d) in the generation based on world knowledge, i.e. an argument of the context. The second approach is motivated by our hypothesis that providing only a single word context will force the model to generate a hypothesis that generalizes over the many contexts in which that word was seen, resulting in more common-sense-like hypotheses, as in Fig 4. We later present the full context and decoded hypotheses to crowdsourced annotation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 711, |
|
"end": 717, |
|
"text": "Fig 4.", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Generation via Neural Methods", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "dustpan a person is cleaning. a boy in blue and white shorts is sweeping with a broom and dustpan.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generation via Neural Methods", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "a young man is holding a broom. Neural Sequence-to-Sequence Model", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generation via Neural Methods", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "Neural sequence-to-sequence models learn to map variable-length input sequences to variable-length output sequences, as a conditional probability of output given input. For our purposes, we want to learn the conditional probability of an hypothesis sentence, H, given a context sentence, C, i.e., P (H|C).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generation via Neural Methods", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "The sequence-to-sequence architecture consists of two components: an encoder and a decoder. The encoder is a recurrent neural network (RNN) iterating over input tokens (i.e., words in C), and the decoder is another RNN iterating over output tokens (words in H). The final state of the encoder, h C , is passed to the decoder as its initial state. We use a three-layer stacked LSTM (state size 512) for both the encoder and decoder RNN cells, with independent parameters for each. We use the LSTM formulation of Hochreiter and Schmidhuber (1997) as summarized in Vinyals et al. (2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 511, |
|
"end": 544, |
|
"text": "Hochreiter and Schmidhuber (1997)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 562, |
|
"end": 583, |
|
"text": "Vinyals et al. (2015)", |
|
"ref_id": "BIBREF74" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generation via Neural Methods", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "The network computes P (H|C):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generation via Neural Methods", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P (H|C) = len(H) t=1 p(w t |w <t , C)", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Generation via Neural Methods", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "where w t are the words in H. At each time step, t, the successive conditional probability is computed from the LSTM's current hidden state:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generation via Neural Methods", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(w t |w <t , C) \u221d exp(v wt \u2022 h t )", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Generation via Neural Methods", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "where v wt is the embedding of word w t from its corresponding row in the output vocabulary matrix, V (a learnable parameter of the network), and h t is the hidden state of the decoder RNN at time t. In our implementation, we set the vocabulary to be all words that appear in the training data at least twice, resulting in a vocabulary of size 24,322. This model also makes use of an attention mechanism. 17 An attention vector, attn t , is concatenated with the LSTM hidden state at time t to form the hidden state, h t , from which output probabilities are computed (Eqn. 2). This attention vector is a weighted average of the hidden states of the encoder, h 1\u2264i\u2264len(C) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 405, |
|
"end": 407, |
|
"text": "17", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generation via Neural Methods", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "u t i = v T tanh(W 1 h i + W 2 h t ) a t i = softmax(u t i ) attn t = len(C) i=1 a t i h i", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Generation via Neural Methods", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "where vector v and matrices W 1 , W 2 are parameters. The network is trained via backpropagation on the cross-entropy loss of the observed sequences in training. A sampled softmax is used to compute the loss during training, while a full softmax is used after training to score unseen (C, H) pairs, or to generate an H given a C. Generation is performed via beam search with a beam size of 1; the highest probability word is decoded at each time step and fed as input to the decoder at the next time step until an end-of-sequence token is decoded.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generation via Neural Methods", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "In this stage, we turn to human efforts to annotate common-sense inference candidates with ordinal labels. The annotator is given a context, and then is asked to assess the likelihood of the hypotheses being true. These context-hypothesis pairs are annotated with one of the five labels: very likely, likely, plausible, technically possible, and impossible, corresponding to the ordinal values of {5,4,3,2,1} respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ordinal Label Annotation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In the case that the hypotheses in the inference candidates do not make sense, or have grammatical errors, judges can provide an additional label, NA, so that we can filter these candidates in post-processing. The combination of generation of common-sense inference candidates with human filtering seeks to avoid the problem of elicitation bias.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ordinal Label Annotation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We now describe indepth how we created the JHU Ordinal Common-sense Inference (JOCI) corpus. The main part of the corpus consists of contexts chosen from SNLI (Bowman et al., 2015) and ROCStories (Mostafazadeh et al., 2016) , paired with hypotheses generated via methods described in \u00a74.1. These pairs are then annotated with ordinal labels using crowdsourcing ( \u00a74.2). We also include context-hypothesis pairs directly taken from SNLI and other corpora (e.g., as premise-hypothesis pairs), and re-annotate them with ordinal labels.", |
|
"cite_spans": [ |
|
{ |
|
"start": 196, |
|
"end": 223, |
|
"text": "(Mostafazadeh et al., 2016)", |
|
"ref_id": "BIBREF46" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "JOCI Corpus", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In order to compare with existing inference corpora, we choose contexts from two resources: (1) the first sentence in the sentence pairs of the SNLI corpus which are captions from the Flickr30k corpus (Young et al., 2014) , and (2) the first sentence in the stories of the ROCStories corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 201, |
|
"end": 221, |
|
"text": "(Young et al., 2014)", |
|
"ref_id": "BIBREF76" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data sources for Context-Hypothesis Pairs", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We then collect candidates of automatically generated common-sense inferences (AGCI) against these contexts. Specifically, in the SNLI train set, there are over 150K different first sentences, involving 7,414 different arguments according to predicate-argument extraction. We randomly choose 4,600 arguments. For each argument, we sample one first sentence that has the argument and collect candidates of AGCI against this as context. We also do the same generation for the SNLI development set and test set. We also collect candidates of AGCI against randomly sampled first sentences in the ROCStories corpus. Collectively, these pairs and their ordinal labels (to be described in \u00a7 5.2) make up the main part of the JOCI corpus. The statistics of this subset are shown in Table 1 (first five rows).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 774, |
|
"end": 781, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data sources for Context-Hypothesis Pairs", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "For comprehensiveness, we also produced ordinal labels on (C, H) pairs directly drawn from existing corpora. For SNLI, we randomly select 1000 contexts (premises) from the SNLI train set. Then, the corresponding hypothesis is one of the entailment, neutral, or contradiction hypotheses taken from SNLI. For ROCStories, we defined C as the first sentence of the story, and H as the second or third sentence. For COPA, (C, H) corresponds to premise-effect. The statistics are shown in the bottom rows of ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data sources for Context-Hypothesis Pairs", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We use Amazon Mechanical Turk to annotate the hypotheses with ordinal labels. In each HIT (Human Intelligence Task), a worker is presented with one context and one or two hypotheses, as shown in Fig 5. First, the annotator sees an \"Initial Sentence\" (context), e.g. \"John's goal was to learn how to draw well.\", and is then asked about the plausibility of the hypothesis, e.g. \"A person accomplishes the goal\". In particular, we ask the annotator how plausible the hypothesis is true during or shortly after, because without this constraint, most sentences are technically plausible in some imaginary world. If the hypothesis does not make sense 18 , the workers can check the box under the question and skip the ordinal annotation. In the annotation, about 25% of hypotheses are marked as not making sense, and are removed from our data.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 195, |
|
"end": 201, |
|
"text": "Fig 5.", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Crowdsourced Ordinal Label Annotation", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "With the sampled contexts and the auto-generated Initial Sentence: John 's goal was to learn how to draw well 1. The following statements is to be true during or shortly after the context of the initial sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Crowdsourced Ordinal Label Annotation", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "This statement does not make sense.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A person accomplishes the goal .", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "1 ex . The following statements is to be true during or shortly after the context of the initial sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A person accomplishes the goal .", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This statement does not make sense. hypotheses, we prepare 50K common-sense inference examples for crowdsourced annotation in bulk. In order to guarantee the quality of annotation, we have each example annotated by three workers. We take the median of the three as the gold label. Table 3 shows the statistics of the crowdsourced efforts. To make sure non-expert workers have a correct understanding of our task, before launching the later tasks in bulk, we run two pilots to create a pool of qualified workers. In the first pilot, we publish 100 examples. Each example is annotated by five work-ers. From this pilot, we collect a set of \"good\" examples which have a 100% annotation agreement among workers. The ordinal labels chosen by the workers are regarded as the gold labels. In the second pilot, we randomly select two \"good\" (highagreement) examples for each ordinal label and publish an HIT with these examples. To measure the workers' agreement, we calculate the average of quadratic weighted Cohen's \u03ba scores between the workers' annotation. By setting a threshold of the average of \u03ba scores to 0.7, we are able to create a pool that has over 150 qualified workers.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 281, |
|
"end": 288, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The goal is a content .", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We want a corpus with a reliable inter-annotator agreement. Additionally, in order to evaluate or train a common-sense inference system, we ideally need a corpus that provides as many inference examples as possible for every ordinal likelihood value. In this section, we investigate the characteristics of the JOCI corpus. We also compare JOCI with related resources under our annotation protocol. Quality: We measure the quality of each pair by calculating Cohen's \u03ba of workers' annotations. The average \u03ba of the JOCI corpus is 0.54. Fig 7 shows the growth of the size of JOCI as we decrease the threshold of the averaged \u03ba to filter pairs. Even if we place a relatively strict threshold (>0.6), we still get a large subset of JOCI with over 20K pairs. Label Distribution: We believe datasets with wide support of label distribution are important in training and evaluating systems to recognize ordinal scale inferences. Fig 6a shows the normalized label distribution of JOCI vs. SNLI. As desired, JOCI covers a wide range of ordinal likelihoods, with many samples in each ordinal scale. Note also how traditional RTE labels are related to ordinal labels, although many inferences in SNLI require no common-sense knowledge (e.g. paraphrases). As expected, entailments are mostly considered very likely; neutral inferences mostly plausible; and contradictions likely to be either impossible or technically possible. Fig 6b shows the normalized distributions of JOCI and ROCStories. Compared with ROCStories, JOCI still covers a wider range of ordinal likelihood. In ROCStories we observe that, while 2nd sentences are in general more likely to be true than 3rd, a large proportion of both 2nd and 3rd sentences are plausible, as compared to likely or very likely. This matches intuition: pragmatics dictates that subsequent sentences in a standard narrative carry new in-formation. 19 That our protocol picks this up is an encouraging sign for our ordinal protocol, as well as suggestive that the makeup of the elicited ROCStories collection is indeed \"story like.\"", |
|
"cite_spans": [ |
|
{ |
|
"start": 1882, |
|
"end": 1884, |
|
"text": "19", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 535, |
|
"end": 546, |
|
"text": "Fig 7 shows", |
|
"ref_id": "FIGREF8" |
|
}, |
|
{ |
|
"start": 922, |
|
"end": 934, |
|
"text": "Fig 6a shows", |
|
"ref_id": "FIGREF7" |
|
}, |
|
{ |
|
"start": 1416, |
|
"end": 1428, |
|
"text": "Fig 6b shows", |
|
"ref_id": "FIGREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Corpus Characteristics", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "For the COPA dataset, we only make use of the pairs in which the alternatives are plausible effects (rather than causes) of the premise, as our protocol more easily accommodates these pairs. 20 Annotating this section of COPA with ordinal labels provides an enlightening and validating view of the dataset. Fig 6c shows the normalized distribution of COPA next to that of JOCI (COPA-1 alternatives are marked as most plausible; COPA-0 are not.), True to its name, the majority of COPA alternatives are labeled as either plausible or likely; almost none are impossible. This is consistent with the idea that the COPA task is to determine which of two possible options is the more plausible. Fig 8 shows the joint distribution of ordinal labels on (COPA-0,COPA-1) pairs. As expected, the densest areas of the heatmap lie above the diagonal, indicating that in almost every pair, COPA-1 received a higher likelihood judgement than COPA-0. Automatic Generation Comparisons: We compare the label distributions of different methods for automatic generation of common-sense inference (AGCI) in Fig 9. Among ACGI-WK (generation based on world knowledge) methods, the ISA strategy yields a bimodal distribution, with the majority of inferences labeled impossible or very likely. This is likely because most copular statements generated with the ISA strategy will either be categorically true or false. In contrast, the decision tree and frequency based strategies generate many more hypotheses with intermediate ordinal labels. This suggests the propositional templates (learned from text) capture many \"possibilistic\" hypotheses, which is our aim. The two AGCI-NN (generation via neural methods) strategies show interesting differences in label distribution as well. Sequence-to-sequence decodings with full-sentence prompts lead to more very likely labels than single-word prompts. The reason may be that the model behaves more similarly to SNLI entailments when it has access to all the information in the context. When combined, the five AGCI strategies (three AGCI-WK and two AGCI-NN) provide reasonable coverage over all five categories, as can be seen in Fig 6. 6 Predicting Ordinal Judgments", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 307, |
|
"end": 319, |
|
"text": "Fig 6c shows", |
|
"ref_id": "FIGREF7" |
|
}, |
|
{ |
|
"start": 690, |
|
"end": 701, |
|
"text": "Fig 8 shows", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1087, |
|
"end": 1093, |
|
"text": "Fig 9.", |
|
"ref_id": "FIGREF10" |
|
}, |
|
{ |
|
"start": 2153, |
|
"end": 2159, |
|
"text": "Fig 6.", |
|
"ref_id": "FIGREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Corpus Characteristics", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We want to be able to predict ordinal judgments of the kind presented in this corpus. Our goal in this section is to establish baseline results and explore what kinds of features are useful for predicting ordinal common-sense inference. To do so, we train and test a logistic ordinal regression model g \u03b8 (\u03c6(C, H)), which outputs ordinal labels using features \u03c6 defined on context-inference pairs. Here, g \u03b8 (\u2022) is a regression model with \u03b8 as trained parameters; we train using the margin-based method of (Rennie and Srebro, 2005), implemented in (Pedregosa-Izquierdo, 2015), 21 with the following features: Bag of words features (BOW): We compute (1) \"BOW overlap\" (size of word overlap in C and H), and (2) BOW overlap divided by the length of H.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpus Characteristics", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Using Google's word2vec vectors trained on 100 billion tokens of GoogleNews, 22 we (1) sum the vectors in both the context and hypothesis and compute the cosinesimilarity of the resulting two vectors (\"similarity of average\"), and (2) compute the cosine-similarity of all word pairs across the context and inference, then average those similarities (\"average of similarity\"). Seq2seq score features (S2S): We compute the log probability log P (H|C) under the sequence-tosequence model described in \u00a7 4.1.2. There are five variants: (1) Seq2seq trained on SNLI \"entailment\" pairs only, (2) \"neutral\" pairs only, (3) \"contradiction\" pairs only, (4) \"neutral\" and \"contradiction\" pairs, and (5) SNLI pairs (any label) with the context (premise) replaced by an empty string. Seq2seq binary features (S2S-BIN): Binary indicator features for each of the five seq2seq model variants, indicating that model achieved the lowest score on the context-hypothesis pair.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Similarity features (SIM):", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Length features (LEN): This set comprises three features: the length of the context (in tokens), the difference in length between the context and hypothesis, and a binary feature indicating if the hypothesis is longer than the context.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Similarity features (SIM):", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We train and test our regression model on two subsets of the JOCI corpus, which, for brevity, we call \"A\" and \"B.\" \"A\" consists of 2,976 sentence pairs (i.e., context-hypothesis pairs) from SNLI-train annotated with ordinal labels. This corresponds to the three rows labeled SNLI in Table 1 (993 + 988 + 995 = 2, 976 pairs), and can be viewed as a textual entailment dataset re-labeled with ordinal judgments. \"B\" consists of 6,375 context-inference pairs, in which the contexts are the same 2,976 SNLI-train premises as \"A\", and the hypotheses are generated based on world knowledge ( \u00a74.1.1); these pairs are also annotated with ordinal labels. This corresponds to a subset of the row labeled AGCI in Table 1 . A key difference between \"A\" and \"B\" is that the hypotheses in \"A\" are human-elicited, while those in \"B\" are auto-generated; we are interested in seeing whether this affects the task's difficulty. 23 Tables 4 and 5 show each model's performance (mean squared error and Spearman's \u03c1, respectively) in predicting ordinal labels. 24 We compare our ordinal regression model g \u03b8 (\u2022) with these baselines:", |
|
"cite_spans": [ |
|
{ |
|
"start": 1041, |
|
"end": 1043, |
|
"text": "24", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 283, |
|
"end": 290, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 703, |
|
"end": 710, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 914, |
|
"end": 928, |
|
"text": "Tables 4 and 5", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Most Frequent: Select the ordinal class appearing most often in train.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Frequency Sampling: Select an ordinal label according to their distribution in train.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Rounded Average: Average over all labels from train rounded to nearest ordinal.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "One-vs-All: Train one SVM classifier per ordinal class and select the class label with the largest corresponding margin. We train this model with the same set of features as the ordinal regression model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Overall, the regression model achieves the lowest MSE and highest \u03c1, implying that this dataset is learnable and tractable. Naturally, we would desire a model that achieves MSE under 1.0, and we hope that the release of our dataset will encourage more concerted effort in this common-sense inference task. Importantly, note that performance on A-test is better than on B-test. We believe \"B\" is a more challenging dataset because auto-generation of hypothesis leads to wider variety than elicitation. We also run a feature ablation test. Table 6 shows that the most useful features differ for Atest and B-test. On A-test, where the inferences are elicited from humans, removal of similarity-and bow-based features together results in the largest performance drop. On B-test, by contrast, removing similarity and bow features results in a com-parable performance drop to removing seq2seq features. These observations point to statistical differences between human-elicited and auto-generated hypotheses, a motivating point of the JOCI corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 538, |
|
"end": 545, |
|
"text": "Table 6", |
|
"ref_id": "TABREF11" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In motivating the need for automatically building collections of common-sense knowledge, Clark et al. (2003) wrote:", |
|
"cite_spans": [ |
|
{ |
|
"start": 89, |
|
"end": 108, |
|
"text": "Clark et al. (2003)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "\"China launched a meteorological satellite into orbit Wednesday.\" suggests to a human reader that (among other things) there was a rocket launch; China probably owns the satellite; the satellite is for monitoring weather; the orbit is around Earth; etc The use of \"etc\" summarizes an infinite number of other statements that a human reader would find to be very likely, likely, technically plausible, or impossible, given the provided context.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Preferably we could build systems that would automatically learn common-sense exclusively from available corpora; extracting not just statements about what is possible, but also the associated probabilities of how likely certain things are to obtain in any given context. We are unaware of existing work that has demonstrated this to be feasible.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We have thus described a multi-stage approach to common-sense textual inference: we first extract large numbers of possible statements from a corpus, and use those statements to generate contextually grounded context-hypothesis pairs. These are presented to humans for direct assessment of subjective likelihood, rather than relying on corpus data alone. As the data is automatically generated, we seek to bypass issues in human elicitation bias. Further, since subjective likelihood judgments are not difficult for humans, our crowdsourcing technique is both inexpensive and scalable.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Future work will extend our techniques for forward inference generation, further scale up the annotation of additional examples, and explore the use of larger, more complex contexts. The resulting JOCI corpus will be used to improve algorithms for natural language inference tasks such as storytelling and story understanding.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "McCarthy (1959): a program has common sense if it automatically deduces for itself a sufficiently wide class of immediate consequences of anything it is told and what it already knows.3 Many of the bridging inferences ofClark (1975) make use of common-sense knowledge, such as the following example of \"Probable part\": I walked into the room. The windows looked out to the bay. To resolve the definite reference the windows, one needs to know that rooms have windows is probable.379Transactions of the Association for Computational Linguistics, vol. 5, pp. 379-395, 2017. Action Editor: Mark Steedman.Submission batch: 12/2016; Revision batch: 3/2017; Published 11/2017. c 2017 Association for Computational Linguistics. Distributed under a CC-BY 4.0 license.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For further background see discussions by Van Durme (2010), Gordon and Van Durme (2013),Rudinger et al. (2015) andMisra et al. (2016).5 The JOCI corpus is released freely at: http://decomp. net/.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "McRae et al. (2005): Features such as <is larger than a tulip> or <moves faster than an infant>, for example; although logically possible, do not occur in [human responses] [...] Although people are capable of verifying that a <dog is larger than a pencil>.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This notion of thematic plausibility is then related to the notion of verb-argument selectional preference(Zernik, 1992;Resnik, 1993;Clark and Weir, 1999), and sortal (in)correctness(Thomason, 1972).8 Thanks to the anonymous reviewer for this connection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Epistemic modality: the likelihood that (some aspect of) a certain state of affairs is/has been/will be true (or false) in the context of the possible world under consideration.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"Contradictories\": exhaustive and mutually exclusive conditions. \"Contraries\": non-exhaustive and mutually exclusive. \"Subcontraries\": exhaustive and non-mutually exclusive.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/hltcoe/PredPatt", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Senses sharing a hypernym with each other are called cohyponyms (e.g., book.n.01, magazine.n.01 and collections.n.02 are co-hyponyms of publication.n.01).16 We use the pattern.en module (http://www.clips. ua.ac.be/pages/pattern-en) for verbalization, which includes determining plurality of the argument, adding proper articles, and conjugating verbs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "See Vinyals et al. (2015) for full details.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"Not making sense\" means that inferences that are incomplete sentences or grammatically wrong.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "If subsequent sentences in a story were always very likely, then those would be boring tales; the reader could infer the conclusion based on the introduction. While at the same time if most subsequent sentences were only technically possible, the reader would give up in confusion.20 Specifically, we treat premises as contexts and effect alternatives as possible hypotheses.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The GoogleNews embeddings are available at: https: //code.google.com/archive/p/word2vec/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Details of the data split is reported in the dataset release. 24 MSE and Spearman's \u03c1 are both commonly used evalua-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "tions in ordinal prediction tasks(Baccianella et al., 2009;Bennett and Lanning, 2007;Gaudette and Japkowicz, 2009;Agresti and Kateri, 2011;Popescu and Dinu, 2009;Liu et al., 2015;Gella et al., 2013).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "Thank you to action editor Mark Steedman and the anonymous reviewers for their feedback, as well as colleagues including Lenhart Schubert, Kyle Rawlins, Aaron White, and Keisuke Sakaguchi. This work was supported in part by DARPA LORELEI, the National Science Foundation Graduate Research Fellowship and the JHU Human Language Technology Center of Excellence (HLTCOE).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Semeval-2012 task 6: A pilot on semantic textual similarity", |
|
"authors": [ |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aitor", |
|
"middle": [], |
|
"last": "Gonzalez-Agirre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the First Joint Conference on Lexical and Computational Semantics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "385--393", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eneko Agirre, Mona Diab, Daniel Cer, and Aitor Gonzalez-Agirre. 2012. Semeval-2012 task 6: A pilot on semantic textual similarity. In Proceedings of the First Joint Conference on Lexical and Computational Semantics-Volume 1: Proceedings of the Main Confer- ence and the Shared Task, and Volume 2: Proceedings of the Sixth International Workshop on Semantic Eval- uation, pages 385-393. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Categorical data analysis", |
|
"authors": [ |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Agresti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Kateri", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "International encyclopedia of statistical science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "206--208", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alan Agresti and Maria Kateri. 2011. Categorical data analysis. In International encyclopedia of statistical science, pages 206-208. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Evaluation measures for ordinal regression", |
|
"authors": [ |
|
{ |
|
"first": "Stefano", |
|
"middle": [], |
|
"last": "Baccianella", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Esuli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabrizio", |
|
"middle": [], |
|
"last": "Sebastiani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Ninth International Conference on Intelligent Systems Design and Applications", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "283--287", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stefano Baccianella, Andrea Esuli, and Fabrizio Sebas- tiani. 2009. Evaluation measures for ordinal regres- sion. In 2009 Ninth International Conference on In- telligent Systems Design and Applications, pages 283- 287. Institute of Electrical and Electronics Engineers.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Neural machine translation by jointly learning to align and translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1409.0473v7" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2014. Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473v7.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "The Berkeley FrameNet Project", |
|
"authors": [ |
|
{ |
|
"first": "Collin", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Baker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Charles", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Fillmore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Lowe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the 36th Annual Meeting of the Association for Computational Linguistics and 17th International Conference on Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "86--90", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Collin F. Baker, Charles J. Fillmore, and John B. Lowe. 1998. The Berkeley FrameNet Project. In Proceed- ings of the 36th Annual Meeting of the Association for Computational Linguistics and 17th International Conference on Computational Linguistics, Volume 1, pages 86-90. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Strategies for lifelong knowledge extraction from the web", |
|
"authors": [ |
|
{ |
|
"first": "Michele", |
|
"middle": [], |
|
"last": "Banko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 4th International Conference on Knowledge Capture", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "95--102", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michele Banko and Oren Etzioni. 2007. Strategies for lifelong knowledge extraction from the web. In Proceedings of the 4th International Conference on Knowledge Capture, pages 95-102. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Representing meaning with a combination of logical and distributional models", |
|
"authors": [ |
|
{ |
|
"first": "Islam", |
|
"middle": [], |
|
"last": "Beltagy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Roller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pengxiang", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katrin", |
|
"middle": [], |
|
"last": "Erk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Mooney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Islam Beltagy, Stephen Roller, Pengxiang Cheng, Katrin Erk, and Raymond J. Mooney. 2017. Representing meaning with a combination of logical and distribu- tional models. Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "The Netflix prize", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Bennett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stan", |
|
"middle": [], |
|
"last": "Lanning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of KDD Cup and Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Bennett and Stan Lanning. 2007. The Netflix prize. In Proceedings of KDD Cup and Workshop, page 35.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Global learning of typed entailment rules", |
|
"authors": [ |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Berant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Goldberger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "610--619", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonathan Berant, Ido Dagan, and Jacob Goldberger. 2011. Global learning of typed entailment rules. In Proceedings of the 49th Annual Meeting of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, pages 610-619. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "A large annotated corpus for learning natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Samuel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabor", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Angeli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Potts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "632--642", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samuel R. Bowman, Gabor Angeli, Christopher Potts, and Christopher D. Manning. 2015. A large anno- tated corpus for learning natural language inference. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 632- 642. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Unsupervised learning of narrative event chains", |
|
"authors": [ |
|
{ |
|
"first": "Nathanael", |
|
"middle": [], |
|
"last": "Chambers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of ACL-08: HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "789--797", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nathanael Chambers and Dan Jurafsky. 2008. Unsuper- vised learning of narrative event chains. In Proceed- ings of ACL-08: HLT, pages 789-797. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Learner: A System for Acquiring Commonsense Knowledge by Analogy", |
|
"authors": [ |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Chklovski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of Second International Conference on Knowledge Capture", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Timothy Chklovski. 2003. Learner: A System for Ac- quiring Commonsense Knowledge by Analogy. In Proceedings of Second International Conference on Knowledge Capture (K-CAP 2003).", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "An iterative approach to estimating frequencies over a semantic hierarchy", |
|
"authors": [ |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Weir", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proceedings of the Joint SIGDAT Conference on Empirical Methods in Natural Language Processing and Very Large Corpora", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen Clark and David Weir. 1999. An iterative ap- proach to estimating frequencies over a semantic hier- archy. In Proceedings of the Joint SIGDAT Conference on Empirical Methods in Natural Language Process- ing and Very Large Corpora. Citeseer.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "A knowledge-driven approach to text meaning processing", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Harrison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Thompson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies Workshop on Text Meaning", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "1--6", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Clark, Phil Harrison, and John Thompson. 2003. A knowledge-driven approach to text meaning process- ing. In Proceedings of the North American Chap- ter of the Association for Computational Linguis- tics: Human Language Technologies Workshop on Text Meaning-Volume 9, pages 1-6. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Bridging", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Herbert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1975, |
|
"venue": "Theoretical issues in natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Herbert H. Clark. 1975. Bridging. In R. C. Schank and B. L. Nash-Webber, editors, Theoretical issues in nat- ural language processing. Association for Computing Machinery, New York.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Using the framework", |
|
"authors": [ |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Cooper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dick", |
|
"middle": [], |
|
"last": "Crouch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Van Eijck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Fox", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johan", |
|
"middle": [], |
|
"last": "Van Genabith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Jaspars", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hans", |
|
"middle": [], |
|
"last": "Kamp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Milward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manfred", |
|
"middle": [], |
|
"last": "Pinkal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Massimo", |
|
"middle": [], |
|
"last": "Poesio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Pulman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robin Cooper, Dick Crouch, Jan Van Eijck, Chris Fox, Johan Van Genabith, Jan Jaspars, Hans Kamp, David Milward, Manfred Pinkal, Massimo Poesio, and Steve Pulman. 1996. Using the framework. Technical re- port, Technical Report LRE 62-051 D-16, The FraCaS Consortium.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "The Pascal recognising textual entailment challenge", |
|
"authors": [ |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Ido Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernardo", |
|
"middle": [], |
|
"last": "Glickman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Magnini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Machine learning challenges: evaluating predictive uncertainty, visual object classification, and recognising textual entailment", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ido Dagan, Oren Glickman, and Bernardo Magnini. 2006. The Pascal recognising textual entailment chal- lenge. In Machine learning challenges: evaluating predictive uncertainty, visual object classification, and recognising textual entailment.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "The Interaction of Modality and Negation: A Typological Study. A Garland Series", |
|
"authors": [ |
|
{ |
|
"first": "Ferdinand", |
|
"middle": [], |
|
"last": "De Haan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ferdinand de Haan. 1997. The Interaction of Modality and Negation: A Typological Study. A Garland Series. Garland Pub.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Universal stanford dependencies: A cross-linguistic typology", |
|
"authors": [ |
|
{ |
|
"first": "Marie-Catherine", |
|
"middle": [], |
|
"last": "De Marneffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Dozat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Natalia", |
|
"middle": [], |
|
"last": "Silveira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katri", |
|
"middle": [], |
|
"last": "Haverinen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Filip", |
|
"middle": [], |
|
"last": "Ginter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4585--4592", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marie-Catherine de Marneffe, Timothy Dozat, Natalia Silveira, Katri Haverinen, Filip Ginter, Joakim Nivre, and Christopher D. Manning. 2014. Universal stan- ford dependencies: A cross-linguistic typology. In Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14), pages 4585-4592. European Language Resources As- sociation (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "A flexible, corpus-driven model of regular and inverse selectional preferences", |
|
"authors": [ |
|
{ |
|
"first": "Katrin", |
|
"middle": [], |
|
"last": "Erk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Pad\u00f3", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulrike", |
|
"middle": [], |
|
"last": "Pad\u00f3", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Computational Linguistics", |
|
"volume": "36", |
|
"issue": "4", |
|
"pages": "723--763", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Katrin Erk, Sebastian Pad\u00f3, and Ulrike Pad\u00f3. 2010. A flexible, corpus-driven model of regular and inverse selectional preferences. Computational Linguistics, 36(4):723-763.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Concretely Annotated Corpora", |
|
"authors": [ |
|
{ |
|
"first": "Francis", |
|
"middle": [], |
|
"last": "Ferraro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Gormley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Travis", |
|
"middle": [], |
|
"last": "Wolfe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Craig", |
|
"middle": [], |
|
"last": "Harman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "4th Workshop on Automated Knowledge Base Construction (AKBC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Francis Ferraro, Max Thomas, Matthew R. Gormley, Travis Wolfe, Craig Harman, and Benjamin Van Durme. 2014. Concretely Annotated Corpora. In 4th Workshop on Automated Knowledge Base Construc- tion (AKBC).", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Project Halo: Towards a digital aristotle", |
|
"authors": [ |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Friedland", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Allen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gavin", |
|
"middle": [], |
|
"last": "Matthews", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Witbrock", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Baxter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jon", |
|
"middle": [], |
|
"last": "Curtis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Blake", |
|
"middle": [], |
|
"last": "Shepard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierluigi", |
|
"middle": [], |
|
"last": "Miraglia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jurgen", |
|
"middle": [], |
|
"last": "Angele", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steffen", |
|
"middle": [], |
|
"last": "Staab", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "AI magazine", |
|
"volume": "25", |
|
"issue": "4", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Noah S. Friedland, Paul G. Allen, Gavin Matthews, Michael Witbrock, David Baxter, Jon Curtis, Blake Shepard, Pierluigi Miraglia, Jurgen Angele, Steffen Staab, et al. 2004. Project Halo: Towards a digital aristotle. AI magazine, 25(4):29.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Integrating logical representations with probabilistic information using Markov logic", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Garrette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katrin", |
|
"middle": [], |
|
"last": "Erk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [], |
|
"last": "Mooney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the Ninth International Conference on Computational Semantics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "105--114", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dan Garrette, Katrin Erk, and Raymond Mooney. 2011. Integrating logical representations with probabilistic information using Markov logic. In Proceedings of the Ninth International Conference on Computational Semantics, pages 105-114. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Evaluation methods for ordinal classification", |
|
"authors": [ |
|
{ |
|
"first": "Lisa", |
|
"middle": [], |
|
"last": "Gaudette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathalie", |
|
"middle": [], |
|
"last": "Japkowicz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 22nd Canadian Conference on Artificial Intelligence: Advances in Artificial Intelligence, Canadian AI '09", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "207--210", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lisa Gaudette and Nathalie Japkowicz. 2009. Evalua- tion methods for ordinal classification. In Proceedings of the 22nd Canadian Conference on Artificial Intel- ligence: Advances in Artificial Intelligence, Canadian AI '09, pages 207-210. Springer-Verlag.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Unsupervised word usage similarity in social media texts", |
|
"authors": [ |
|
{ |
|
"first": "Spandana", |
|
"middle": [], |
|
"last": "Gella", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Cook", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Main Conference and the Shared Task: Semantic Textual Similarity", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "248--253", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Spandana Gella, Paul Cook, and Bo Han. 2013. Unsu- pervised word usage similarity in social media texts. In Second Joint Conference on Lexical and Computa- tional Semantics (*SEM), Volume 1: Proceedings of the Main Conference and the Shared Task: Seman- tic Textual Similarity, pages 248-253. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "The fourth Pascal recognizing textual entailment challenge", |
|
"authors": [ |
|
{ |
|
"first": "Danilo", |
|
"middle": [], |
|
"last": "Giampiccolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hoa", |
|
"middle": [ |
|
"Trang" |
|
], |
|
"last": "Dang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernardo", |
|
"middle": [], |
|
"last": "Magnini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Cabrio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Dolan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the Text Analysis Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Danilo Giampiccolo, Hoa Trang Dang, Bernardo Magnini, Ido Dagan, Elena Cabrio, and Bill Dolan. 2008. The fourth Pascal recognizing textual entail- ment challenge. In Proceedings of the Text Analysis Conference (TAC) 2008.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Reporting bias and knowledge extraction", |
|
"authors": [ |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Gordon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Automated Knowledge Base Construction (AKBC): The 3rd Workshop on Knowledge Extraction at the ACM Conference on Information and Knowledge Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonathan Gordon and Benjamin Van Durme. 2013. Re- porting bias and knowledge extraction. In Automated Knowledge Base Construction (AKBC): The 3rd Work- shop on Knowledge Extraction at the ACM Conference on Information and Knowledge Management.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "ConceptNet 3: a Flexible, Multilingual Semantic Network for Common Sense Knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Catherine", |
|
"middle": [], |
|
"last": "Havasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Speer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Alonso", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of Recent Advances in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Catherine Havasi, Robert Speer, and Jason Alonso. 2007. ConceptNet 3: a Flexible, Multilingual Semantic Net- work for Common Sense Knowledge. In Proceedings of Recent Advances in Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Automatic acquisition of hyponyms from large text corpora", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Marti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hearst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "Proceedings of the 14th Conference on Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "539--545", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marti A. Hearst. 1992. Automatic acquisition of hy- ponyms from large text corpora. In Proceedings of the 14th Conference on Computational Linguistics- Volume 2, pages 539-545. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Methodology for knowledge acquisition", |
|
"authors": [ |
|
{ |
|
"first": "Jerry", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Hobbs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Costanza", |
|
"middle": [], |
|
"last": "Navarretta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jerry R. Hobbs and Costanza Navarretta. 1993. Methodology for knowledge acquisition (unpublished manuscript). http://www.isi.edu/\u02dchobbs/damage.text.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "World knowledge and word meaning", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Jerry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hobbs", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1987, |
|
"venue": "Proceedings of the 1987 Workshop on Theoretical Issues in Natural Language Processing, TINLAP '87", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "20--27", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jerry R. Hobbs. 1987. World knowledge and word mean- ing. In Proceedings of the 1987 Workshop on Theoret- ical Issues in Natural Language Processing, TINLAP '87, pages 20-27. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural computation", |
|
"volume": "9", |
|
"issue": "8", |
|
"pages": "1735--1780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735- 1780.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "A Natural History of Negation. David Hume series", |
|
"authors": [ |
|
{ |
|
"first": "Laurence", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Horn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1989, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Laurence R. Horn. 1989. A Natural History of Negation. David Hume series. CSLI Publications.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "CYC: A large-scale investment in knowledge infrastructure", |
|
"authors": [ |
|
{ |
|
"first": "Douglas", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Lenat", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Communications of the ACM", |
|
"volume": "38", |
|
"issue": "11", |
|
"pages": "33--38", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Douglas B. Lenat. 1995. CYC: A large-scale investment in knowledge infrastructure. Communications of the ACM, 38(11):33-38.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "The Winograd schema challenge", |
|
"authors": [ |
|
{ |
|
"first": "Hector", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Levesque", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ernest", |
|
"middle": [], |
|
"last": "Davis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leora", |
|
"middle": [], |
|
"last": "Morgenstern", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "AAAI Spring Symposium: Logical Formalizations of Commonsense Reasoning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hector J. Levesque, Ernest Davis, and Leora Morgen- stern. 2011. The Winograd schema challenge. In AAAI Spring Symposium: Logical Formalizations of Commonsense Reasoning.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "From trees to predicate-argument structures", |
|
"authors": [ |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Liakata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Pulman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 19th International Conference on Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1--7", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maria Liakata and Stephen Pulman. 2002. From trees to predicate-argument structures. In Proceed- ings of the 19th International Conference on Compu- tational Linguistics-Volume 1, pages 1-7. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "DIRT -Discovery of Inference Rules from Text", |
|
"authors": [ |
|
{ |
|
"first": "Dekang", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Pantel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the seventh ACM SIGKDD International Conference on Knowledge Discovery and Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "323--328", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dekang Lin and Patrick Pantel. 2001. DIRT -Discovery of Inference Rules from Text. In Proceedings of the seventh ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pages 323- 328. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Learning semantic word embeddings based on ordinal knowledge constraints", |
|
"authors": [ |
|
{ |
|
"first": "Quan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hui", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Si", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhen-Hua", |
|
"middle": [], |
|
"last": "Ling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (ACL-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1501--1511", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Quan Liu, Hui Jiang, Si Wei, Zhen-Hua Ling, and Yu Hu. 2015. Learning semantic word embeddings based on ordinal knowledge constraints. In Proceedings of the 53rd Annual Meeting of the Association for Compu- tational Linguistics and the 7th International Joint Conference on Natural Language Processing (ACL- IJCNLP), pages 1501-1511.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Semantics", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Lyons", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1977, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Lyons. 1977. Semantics. Cambridge University Press.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Natural logic for textual inference", |
|
"authors": [ |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Maccartney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the ACL-PASCAL Workshop on Textual Entailment and Paraphrasing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "193--200", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bill MacCartney and Christopher D. Manning. 2007. Natural logic for textual inference. In Proceedings of the ACL-PASCAL Workshop on Textual Entailment and Paraphrasing, pages 193-200. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "A SICK cure for the evaluation of compositional distributional semantic models", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Marelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefano", |
|
"middle": [], |
|
"last": "Menini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Baroni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luisa", |
|
"middle": [], |
|
"last": "Bentivogli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raffaella", |
|
"middle": [], |
|
"last": "Bernardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Zamparelli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Ninth International Conference on Language Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "216--223", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Marelli, Stefano Menini, Marco Baroni, Luisa Bentivogli, Raffaella Bernardi, and Roberto Zampar- elli. 2014. A SICK cure for the evaluation of composi- tional distributional semantic models. In Proceedings of the Ninth International Conference on Language Resources and Evaluation, pages 216-223.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Programs with common sense", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Mccarthy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1959, |
|
"venue": "Proceedings of the Teddington Conference on the Mechanization of Thought Processes", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John McCarthy. 1959. Programs with common sense. In Proceedings of the Teddington Conference on the Mechanization of Thought Processes, London: Her Majesty's Stationery Office.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Modeling the influence of thematic fit (and other constraints) in on-line sentence comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Ken", |
|
"middle": [], |
|
"last": "Mcrae", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Spivey-Knowlton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Tanenhaus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Journal Of Memory and Language", |
|
"volume": "38", |
|
"issue": "", |
|
"pages": "283--312", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ken McRae, Michael J. Spivey-Knowlton, and Michael K. Tanenhaus. 1998. Modeling the in- fluence of thematic fit (and other constraints) in on-line sentence comprehension. Journal Of Memory and Language, 38:283-312.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Semantic feature production norms for a large set of living and nonliving things", |
|
"authors": [ |
|
{ |
|
"first": "Ken", |
|
"middle": [], |
|
"last": "Mcrae", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Cree", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Seidenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Mcnorgan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Behavior Research Methods, Instruments, & Computers", |
|
"volume": "37", |
|
"issue": "4", |
|
"pages": "547--559", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ken McRae, George S. Cree, Mark S. Seidenberg, and Chris McNorgan. 2005. Semantic feature production norms for a large set of living and nonliving things. Behavior Research Methods, Instruments, & Comput- ers, 37(4):547-559.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "WordNet: a lexical database for English", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "George", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Communications of the ACM", |
|
"volume": "38", |
|
"issue": "11", |
|
"pages": "39--41", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George A. Miller.1995. WordNet: a lexical database for English. Communications of the ACM, 38(11):39-41.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Seeing through the human reporting bias: Visual classifiers from noisy humancentric labels", |
|
"authors": [ |
|
{ |
|
"first": "Ishan", |
|
"middle": [], |
|
"last": "Misra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"Lawrence" |
|
], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Margaret", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ross", |
|
"middle": [], |
|
"last": "Girshick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2930--2939", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ishan Misra, C. Lawrence Zitnick, Margaret Mitchell, and Ross Girshick. 2016. Seeing through the human reporting bias: Visual classifiers from noisy human- centric labels. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2930-2939.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "A corpus and Cloze evaluation for deeper understanding of commonsense stories", |
|
"authors": [ |
|
{ |
|
"first": "Nasrin", |
|
"middle": [], |
|
"last": "Mostafazadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathanael", |
|
"middle": [], |
|
"last": "Chambers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucy", |
|
"middle": [], |
|
"last": "Vanderwende", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushmeet", |
|
"middle": [], |
|
"last": "Kohli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Allen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "839--849", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nasrin Mostafazadeh, Nathanael Chambers, Xiaodong He, Devi Parikh, Dhruv Batra, Lucy Vanderwende, Pushmeet Kohli, and James Allen. 2016. A corpus and Cloze evaluation for deeper understanding of com- monsense stories. In Proceedings of the 2016 Confer- ence of the North American Chapter of the Associa- tion for Computational Linguistics: Human Language Technologies, pages 839-849. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "What you seek is what you get: Extraction of class attributes from query logs", |
|
"authors": [ |
|
{ |
|
"first": "Marius", |
|
"middle": [], |
|
"last": "Pa\u015fca", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 20th International Joint Conference on Artifical Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marius Pa\u015fca and Benjamin Van Durme. 2007. What you seek is what you get: Extraction of class attributes from query logs. In Proceedings of the 20th Interna- tional Joint Conference on Artifical Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "A probabilistic model of semantic plausibility in sentence processing", |
|
"authors": [ |
|
{ |
|
"first": "Ulrike", |
|
"middle": [], |
|
"last": "Pad\u00f3", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Crocker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Keller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Crocker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Cognitive Science", |
|
"volume": "33", |
|
"issue": "5", |
|
"pages": "795--838", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ulrike Pad\u00f3, Matthew W. Crocker, Frank Keller, and Matthew W. Crocker. 2009. A probabilistic model of semantic plausibility in sentence processing. Cog- nitive Science, 33(5):795-838.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "ISP: Learning inferential selectional preferences", |
|
"authors": [ |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Pantel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Bhagat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bonaventura", |
|
"middle": [], |
|
"last": "Coppola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Chklovski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of Human Language Technologies: The Annual Conference of the North American Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "564--571", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Patrick Pantel, Rahul Bhagat, Bonaventura Coppola, Timothy Chklovski, and Eduard H. Hovy. 2007. ISP: Learning inferential selectional preferences. In Pro- ceedings of Human Language Technologies: The An- nual Conference of the North American Chapter of the Association for Computational Linguistics, pages 564-571.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "The LAMBADA dataset: Word prediction requiring a broad discourse context", |
|
"authors": [ |
|
{ |
|
"first": "Denis", |
|
"middle": [], |
|
"last": "Paperno", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Germ\u00e1n", |
|
"middle": [], |
|
"last": "Kruszewski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angeliki", |
|
"middle": [], |
|
"last": "Lazaridou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ngoc", |
|
"middle": [ |
|
"Quan" |
|
], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raffaella", |
|
"middle": [], |
|
"last": "Bernardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandro", |
|
"middle": [], |
|
"last": "Pezzelle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Baroni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gemma", |
|
"middle": [], |
|
"last": "Boleda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raquel", |
|
"middle": [], |
|
"last": "Fernandez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1525--1534", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Denis Paperno, Germ\u00e1n Kruszewski, Angeliki Lazari- dou, Ngoc Quan Pham, Raffaella Bernardi, Sandro Pezzelle, Marco Baroni, Gemma Boleda, and Raquel Fernandez. 2016. The LAMBADA dataset: Word pre- diction requiring a broad discourse context. In Pro- ceedings of the 54th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 1: Long Papers), pages 1525-1534. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "English Gigaword Fifth Edition", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Parker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Graff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junbo", |
|
"middle": [], |
|
"last": "Kong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ke", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazuaki", |
|
"middle": [], |
|
"last": "Maeda", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert Parker, David Graff, Junbo Kong, Ke Chen, and Kazuaki Maeda. 2011. English Gigaword Fifth Edi- tion. Linguistic Data Consortium.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "Turning web text and search queries into factual knowledge: Hierarchical class attribute extraction", |
|
"authors": [ |
|
{ |
|
"first": "Marius", |
|
"middle": [], |
|
"last": "Pasca", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 23rd National Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marius Pasca. 2008. Turning web text and search queries into factual knowledge: Hierarchical class attribute ex- traction. In Proceedings of the 23rd National Confer- ence on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "Feature extraction and supervised learning on fMRI: from practice to theory", |
|
"authors": [ |
|
{ |
|
"first": "Fabian", |
|
"middle": [], |
|
"last": "Pedregosa-Izquierdo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fabian Pedregosa-Izquierdo. 2015. Feature extraction and supervised learning on fMRI: from practice to the- ory. Ph.D. thesis, Universit\u00e9 Pierre et Marie Curie- Paris VI.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "Learning statistical scripts with LSTM recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Karl", |
|
"middle": [], |
|
"last": "Pichotta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Mooney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 30th AAAI Conference on Artificial Intelligence (AAAI)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2800--2806", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karl Pichotta and Raymond J. Mooney. 2016. Learn- ing statistical scripts with LSTM recurrent neural net- works. In Proceedings of the 30th AAAI Conference on Artificial Intelligence (AAAI), pages 2800-2806.", |
|
"links": null |
|
}, |
|
"BIBREF55": { |
|
"ref_id": "b55", |
|
"title": "Comparing statistical similarity measures for stylistic multivariate analysis", |
|
"authors": [ |
|
{ |
|
"first": "Marius", |
|
"middle": [], |
|
"last": "Popescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liviu", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Dinu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of Recent Advances in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "349--354", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marius Popescu and Liviu P. Dinu. 2009. Comparing statistical similarity measures for stylistic multivariate analysis. In Proceedings of Recent Advances in Natu- ral Language Processing, pages 349-354.", |
|
"links": null |
|
}, |
|
"BIBREF56": { |
|
"ref_id": "b56", |
|
"title": "Loss functions for preference levels: Regression with discrete ordered labels", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Jason", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Rennie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Srebro", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the IJCAI Multidisciplinary Workshop on Advances in Preference Handling", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "180--186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason D.M. Rennie and Nathan Srebro. 2005. Loss func- tions for preference levels: Regression with discrete ordered labels. In Proceedings of the IJCAI Multidis- ciplinary Workshop on Advances in Preference Han- dling, pages 180-186.", |
|
"links": null |
|
}, |
|
"BIBREF57": { |
|
"ref_id": "b57", |
|
"title": "Semantic classes and syntactic ambiguity", |
|
"authors": [ |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Proceedings of ARPA Workshop on Human Language Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philip Resnik. 1993. Semantic classes and syntactic am- biguity. In Proceedings of ARPA Workshop on Human Language Technology.", |
|
"links": null |
|
}, |
|
"BIBREF58": { |
|
"ref_id": "b58", |
|
"title": "Markov logic networks", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Richardson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pedro", |
|
"middle": [], |
|
"last": "Domingos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Machine learning", |
|
"volume": "62", |
|
"issue": "1-2", |
|
"pages": "107--136", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Richardson and Pedro Domingos. 2006. Markov logic networks. Machine learning, 62(1- 2):107-136.", |
|
"links": null |
|
}, |
|
"BIBREF59": { |
|
"ref_id": "b59", |
|
"title": "MindNet: Acquiring and structuring semantic information from text", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Stephen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Richardson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucy", |
|
"middle": [], |
|
"last": "Dolan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Vanderwende", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the 36th Annual Meeting of the Association for Computational Linguistics and 17th International Conference on Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "1098--1102", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen D. Richardson, William B. Dolan, and Lucy Van- derwende. 1998. MindNet: Acquiring and structuring semantic information from text. In Proceedings of the 36th Annual Meeting of the Association for Computa- tional Linguistics and 17th International Conference on Computational Linguistics, Volume 2, pages 1098- 1102. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF60": { |
|
"ref_id": "b60", |
|
"title": "Choice of plausible alternatives: An evaluation of commonsense causal reasoning", |
|
"authors": [ |
|
{ |
|
"first": "Melissa", |
|
"middle": [], |
|
"last": "Roemmele", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Cosmin Adrian Bejan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gordon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "AAAI Spring Symposium: Logical Formalizations of Commonsense Reasoning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "90--95", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Melissa Roemmele, Cosmin Adrian Bejan, and An- drew S. Gordon. 2011. Choice of plausible alterna- tives: An evaluation of commonsense causal reason- ing. In AAAI Spring Symposium: Logical Formaliza- tions of Commonsense Reasoning, pages 90-95.", |
|
"links": null |
|
}, |
|
"BIBREF61": { |
|
"ref_id": "b61", |
|
"title": "Script induction as language modeling", |
|
"authors": [ |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Rudinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpendre", |
|
"middle": [], |
|
"last": "Rastogi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francis", |
|
"middle": [], |
|
"last": "Ferraro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1681--1686", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rachel Rudinger, Pushpendre Rastogi, Francis Ferraro, and Benjamin Van Durme. 2015. Script induction as language modeling. In Proceedings of the 2015 Con- ference on Empirical Methods in Natural Language Processing, pages 1681-1686. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF62": { |
|
"ref_id": "b62", |
|
"title": "FactBank: A corpus annotated with event factuality. Language resources and evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Roser", |
|
"middle": [], |
|
"last": "Saur\u00ed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Pustejovsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "43", |
|
"issue": "", |
|
"pages": "227--268", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roser Saur\u00ed and James Pustejovsky. 2009. FactBank: A corpus annotated with event factuality. Language resources and evaluation, 43(3):227-268.", |
|
"links": null |
|
}, |
|
"BIBREF63": { |
|
"ref_id": "b63", |
|
"title": "An exploration of semantic features in an unsupervised thematic fit evaluation framework", |
|
"authors": [ |
|
{ |
|
"first": "Asad", |
|
"middle": [], |
|
"last": "Sayeed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vera", |
|
"middle": [], |
|
"last": "Demberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pavel", |
|
"middle": [], |
|
"last": "Shkadzko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Italian Journal of Computational Linguistics", |
|
"volume": "", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Asad Sayeed, Vera Demberg, and Pavel Shkadzko. 2015. An exploration of semantic features in an unsupervised thematic fit evaluation framework. Italian Journal of Computational Linguistics, 1(1).", |
|
"links": null |
|
}, |
|
"BIBREF64": { |
|
"ref_id": "b64", |
|
"title": "Using knowledge to understand", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Roger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Schank", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1975, |
|
"venue": "TINLAP '75: Proceedings of the 1975 Workshop on Theoretical Issues in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "117--121", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roger C. Schank. 1975. Using knowledge to understand. In TINLAP '75: Proceedings of the 1975 Workshop on Theoretical Issues in Natural Language Processing, pages 117-121.", |
|
"links": null |
|
}, |
|
"BIBREF65": { |
|
"ref_id": "b65", |
|
"title": "Can we derive general world knowledge from texts?", |
|
"authors": [ |
|
{ |
|
"first": "Lenhart", |
|
"middle": [], |
|
"last": "Schubert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the Second International Conference on Human Language Technology Research", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "94--97", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lenhart Schubert. 2002. Can we derive general world knowledge from texts? In Proceedings of the Second International Conference on Human Language Tech- nology Research, pages 94-97. Morgan Kaufmann Publishers Inc.", |
|
"links": null |
|
}, |
|
"BIBREF66": { |
|
"ref_id": "b66", |
|
"title": "The public acquisition of commonsense knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Push", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of AAAI Spring Symposium: Acquiring (and Using) Linguistic (and World) Knowledge for Information Access", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Push Singh. 2002. The public acquisition of common- sense knowledge. In Proceedings of AAAI Spring Symposium: Acquiring (and Using) Linguistic (and World) Knowledge for Information Access. AAAI.", |
|
"links": null |
|
}, |
|
"BIBREF67": { |
|
"ref_id": "b67", |
|
"title": "Semantic taxonomy induction from heterogenous evidence", |
|
"authors": [ |
|
{ |
|
"first": "Rion", |
|
"middle": [], |
|
"last": "Snow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 21st International Conference on Computational Linguistics and 44th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "801--808", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rion Snow, Daniel Jurafsky, and Andrew Y. Ng. 2006. Semantic taxonomy induction from heterogenous evi- dence. In Proceedings of the 21st International Con- ference on Computational Linguistics and 44th Annual Meeting of the Association for Computational Linguis- tics, pages 801-808. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF68": { |
|
"ref_id": "b68", |
|
"title": "YAGO: A Core of Semantic Knowledge Unifying WordNet and Wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Fabian", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Suchanek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gjergji", |
|
"middle": [], |
|
"last": "Kasneci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Weikum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 16th International Conference on World Wide Web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fabian M. Suchanek, Gjergji Kasneci, and Gerhard Weikum. 2007. YAGO: A Core of Semantic Knowl- edge Unifying WordNet and Wikipedia. In Proceed- ings of the 16th International Conference on World Wide Web, page 697.", |
|
"links": null |
|
}, |
|
"BIBREF69": { |
|
"ref_id": "b69", |
|
"title": "Discourse context and sentence perception", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Tanenhaus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Seidenberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1980, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael K. Tanenhaus and Mark S. Seidenberg. 1980. Discourse context and sentence perception. Technical Report 176, Center for the Study of Reading, Illinois University, Urbana.", |
|
"links": null |
|
}, |
|
"BIBREF70": { |
|
"ref_id": "b70", |
|
"title": "A Semantic Theory of Sortal Incorrectness", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Richmond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Thomason", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1972, |
|
"venue": "Journal of Philosophical Logic", |
|
"volume": "1", |
|
"issue": "2", |
|
"pages": "209--258", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richmond H. Thomason. 1972. A Semantic Theory of Sortal Incorrectness. Journal of Philosophical Logic, 1(2):209-258, May.", |
|
"links": null |
|
}, |
|
"BIBREF71": { |
|
"ref_id": "b71", |
|
"title": "Open knowledge extraction through compositional language processing", |
|
"authors": [ |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lenhart", |
|
"middle": [], |
|
"last": "Schubert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 2008 Conference on Semantics in Text Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "239--254", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benjamin Van Durme and Lenhart Schubert. 2008. Open knowledge extraction through compositional language processing. In Proceedings of the 2008 Conference on Semantics in Text Processing, pages 239-254. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF72": { |
|
"ref_id": "b72", |
|
"title": "Deriving generalized knowledge from corpora using WordNet abstraction", |
|
"authors": [ |
|
{ |
|
"first": "Phillip", |
|
"middle": [], |
|
"last": "Benjamin Van Durme", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lenhart", |
|
"middle": [], |
|
"last": "Michalak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Schubert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 12th Conference of the European Chapter of the ACL (EACL 2009)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "808--816", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benjamin Van Durme, Phillip Michalak, and Lenhart Schubert. 2009. Deriving generalized knowledge from corpora using WordNet abstraction. In Proceed- ings of the 12th Conference of the European Chapter of the ACL (EACL 2009), pages 808-816. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF73": { |
|
"ref_id": "b73", |
|
"title": "Extracting implicit knowledge from text", |
|
"authors": [ |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "14627--0226", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benjamin Van Durme. 2010. Extracting implicit knowl- edge from text. Ph.D. thesis, University of Rochester, Department of Computer Science, Rochester, NY 14627-0226.", |
|
"links": null |
|
}, |
|
"BIBREF74": { |
|
"ref_id": "b74", |
|
"title": "Grammar as a foreign language", |
|
"authors": [ |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Terry", |
|
"middle": [], |
|
"last": "Koo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "28", |
|
"issue": "", |
|
"pages": "2773--2781", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oriol Vinyals, \u0141ukasz Kaiser, Terry Koo, Slav Petrov, Ilya Sutskever, and Geoffrey Hinton. 2015. Grammar as a foreign language. In C. Cortes, N. D. Lawrence, D. D. Lee, M. Sugiyama, and R. Garnett, editors, Ad- vances in Neural Information Processing Systems 28, pages 2773-2781. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF75": { |
|
"ref_id": "b75", |
|
"title": "Universal decompositional semantics on universal dependencies", |
|
"authors": [ |
|
{ |
|
"first": "Aaron", |
|
"middle": [], |
|
"last": "Steven White", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Drew", |
|
"middle": [], |
|
"last": "Reisinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Keisuke", |
|
"middle": [], |
|
"last": "Sakaguchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Vieira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sheng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Rudinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Rawlins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1713--1723", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aaron Steven White, Drew Reisinger, Keisuke Sak- aguchi, Tim Vieira, Sheng Zhang, Rachel Rudinger, Kyle Rawlins, and Benjamin Van Durme. 2016. Uni- versal decompositional semantics on universal depen- dencies. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 1713-1723. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF76": { |
|
"ref_id": "b76", |
|
"title": "From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alice", |
|
"middle": [], |
|
"last": "Lai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Micah", |
|
"middle": [], |
|
"last": "Hodosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Hockenmaier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "67--78", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Young, Alice Lai, Micah Hodosh, and Julia Hock- enmaier. 2014. From image descriptions to visual denotations: New similarity metrics for semantic in- ference over event descriptions. Transactions of the Association for Computational Linguistics, 2:67-78.", |
|
"links": null |
|
}, |
|
"BIBREF77": { |
|
"ref_id": "b77", |
|
"title": "Closed yesterday and closed minds: Asking the right questions of the corpus to distinguish thematic from sentential relations", |
|
"authors": [ |
|
{ |
|
"first": "Uri", |
|
"middle": [], |
|
"last": "Zernik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "Proceedings of the 14th Conference on Computational Linguistics", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "1305--1311", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Uri Zernik. 1992. Closed yesterday and closed minds: Asking the right questions of the corpus to distinguish thematic from sentential relations. In Proceedings of the 14th Conference on Computational Linguistics- Volume 4, pages 1305-1311. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF78": { |
|
"ref_id": "b78", |
|
"title": "MT/IE: Cross-lingual open information extraction with neural sequence-to-sequence models", |
|
"authors": [ |
|
{ |
|
"first": "Sheng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Duh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "64--70", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sheng Zhang, Kevin Duh, and Benjamin Van Durme. 2017. MT/IE: Cross-lingual open information ex- traction with neural sequence-to-sequence models. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguis- tics: Volume 2, Short Papers, pages 64-70. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Examples of common-sense inference ranging from very likely, likely, plausible, technically possible, to impossible." |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "flip comes up heads." |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "SO for epistemic modals(Saur\u00ed and Pustejovsky, 2009).10" |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Generating common-sense inferences based on general world knowledge." |
|
}, |
|
"FIGREF4": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Examples of sequence-to-sequence hypothesis generation from single-word and full-sentence inputs." |
|
}, |
|
"FIGREF5": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "The annotation interface, with a drop-down list provides ordinal labels to select." |
|
}, |
|
"FIGREF7": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Comparison of normalized distributions between JOCI and other corpora.qualitatively confirming we can generate and collect annotations of pairs at each ordinal category." |
|
}, |
|
"FIGREF8": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Data growth along averaged \u03ba scores." |
|
}, |
|
"FIGREF9": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Figure 8: COPA heatmap." |
|
}, |
|
"FIGREF10": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "21 LogisticSE: http://github.com/fabianp/mord im po ss ib le te ch -p os sib le pl au sib le lik Distribution of AGCI-WK im po ss ib le te ch -p os sib le pl au sib le lik el y ve ry -li ke ly Label distributions of AGCI." |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "this approach generates common-sense inference candidates in four steps: (a) extracting propositions with predicate-argument structures from texts, (b) abstracting over propositions to generate templates for concepts, (c) deriving properties of concepts via different strategies, and (d) generating possibilistic hypotheses from contexts.John borrowed the books from the library .", |
|
"content": "<table><tr><td>(a) Extraction</td><td/></tr><tr><td>plain text</td><td/></tr><tr><td colspan=\"2\">pred-arg structured proposition</td></tr><tr><td colspan=\"3\">[John] borrowed [the books] from [the library]</td></tr><tr><td>(b) Abstraction</td><td/></tr><tr><td colspan=\"2\">abstracted proposition</td></tr><tr><td colspan=\"3\">[person] borrow [book] from [library]</td></tr><tr><td colspan=\"2\">propositional templates</td></tr><tr><td>person.n.01</td><td colspan=\"2\">____ borrow book from library</td></tr><tr><td>book.n.01</td><td colspan=\"2\">person borrow ____ from library</td></tr><tr><td>library.n.01</td><td colspan=\"2\">person borrow book from ____</td></tr><tr><td colspan=\"3\">(c) Property derivation using the decision tree</td></tr><tr><td/><td>feature</td></tr><tr><td/><td>Yes</td><td>No</td></tr><tr><td>feature</td><td/></tr><tr><td colspan=\"2\">person subscribe to ____</td></tr><tr><td>Yes</td><td>No</td></tr><tr><td/><td>feature</td></tr><tr><td/><td colspan=\"2\">person borrow ____ from library</td></tr><tr><td/><td>Yes</td><td>No</td></tr><tr><td/><td/><td>\u2026</td></tr><tr><td colspan=\"2\">(d) Inference generation</td></tr><tr><td>context</td><td/></tr><tr><td colspan=\"3\">The professor recommended [books] for this course.</td></tr></table>" |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "", |
|
"content": "<table><tr><td>Subset Name</td><td colspan=\"3\"># pairs Context Source Hypothesis Source</td></tr><tr><td/><td>22,086</td><td>SNLI-train</td><td>AGCI-WK</td></tr><tr><td>AGCI against</td><td>2,456 2,362</td><td>SNLI-dev SNLI-test</td><td>AGCI-WK AGCI-WK</td></tr><tr><td>SNLI/ROCStories</td><td>5,002</td><td>ROCStories</td><td>AGCI-WK</td></tr><tr><td/><td>1,211</td><td>SNLI-train</td><td>AGCI-NN</td></tr><tr><td/><td>993</td><td>SNLI-train</td><td>SNLI-entailment</td></tr><tr><td>SNLI</td><td>988</td><td>SNLI-train</td><td>SNLI-neutral</td></tr><tr><td/><td>995</td><td>SNLI-train</td><td>SNLI-contradiction</td></tr><tr><td>ROCStories</td><td colspan=\"2\">1,000 ROCStories-1st 1,000 ROCStories-1st</td><td>ROCStories-2nd ROCStories-3rd</td></tr><tr><td>COPA</td><td colspan=\"2\">1,000 COPA-premise</td><td>COPA-effect</td></tr><tr><td>Total</td><td>39,093</td><td>-</td><td>-</td></tr></table>" |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "JOCI corpus statistics, where each subset consists of different sources for context-and-hypothesis pairs, each annotated with common-sense ordinal labels. AGCI-WK represents candidates generated based on world knowledge. AGCI-NN represents candidates generated via neural methods.", |
|
"content": "<table/>" |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Kelly was playing a soccer match for her University. The University is dismantled.", |
|
"content": "<table><tr><td colspan=\"3\">M. Labels 5 [5,5,5] John was excited to go to the fair. Context</td><td>Hypothesis The fair opens.</td></tr><tr><td>4</td><td colspan=\"2\">[4,4,3] Today my water heater broke.</td><td>A person looks for a heater.</td></tr><tr><td>3</td><td colspan=\"2\">[3,3,4] John 's goal was to learn how to draw well.</td><td>A person accomplishes the goal.</td></tr><tr><td colspan=\"2\">2 [2,2,2] 1 [1,1,1]</td><td>A brown-haired lady dressed all in blue denim sits in a group of pigeons.</td><td>People are made of the denim.</td></tr><tr><td>5 4</td><td colspan=\"2\">[5,5,4] [4,4,3] A group of people have an outside cookout. Two females are playing rugby on a field, one with a blue uniform and one with a white uniform.</td><td>Two females are play sports outside. People are having conversations.</td></tr><tr><td>3</td><td colspan=\"2\">[3,3,3] Two dogs fighting, one is black, the other beige.</td><td>The dogs are playing.</td></tr><tr><td>2</td><td>[2,2,3]</td><td>A bare headed man wearing a dark blue cassock, sandals, and dark blue socks mounts the stone steps leading into a weathered old building.</td><td>A man is in the middle of home building.</td></tr><tr><td>1</td><td>[1,1,1]</td><td>A skydiver hangs from the undercarriage of an air-plane or some sort of air gliding device.</td><td>A camera is using an object.</td></tr></table>" |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Examples of context-and-hypothesis pairs with ordinal judgments and Median value.", |
|
"content": "<table/>" |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Statistics of the crowdsourced efforts.", |
|
"content": "<table/>" |
|
}, |
|
"TABREF6": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "", |
|
"content": "<table><tr><td>contains pairs randomly sampled from this subset,</td></tr></table>" |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Mean squared error.", |
|
"content": "<table><tr><td>Model Regression: g \u03b8 (\u2022) Most Frequent Freq. Sampling Rounded Average One-vs-All</td><td>A-train A-test B-train B-test .39* .40* .32* .27* .00* .00* .00* .00* .03 .10 .01 .01 .00* .00* .00* .00* .31* .30* .28* .24*</td></tr></table>" |
|
}, |
|
"TABREF9": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Spearman's \u03c1. (*p-value<.01)", |
|
"content": "<table/>" |
|
}, |
|
"TABREF11": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Ablation results for ordinal regression model on A-test and B-test. (*p-value<.01 for \u03c1)", |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |