|
{ |
|
"paper_id": "S19-1027", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:45:36.620764Z" |
|
}, |
|
"title": "HELP: A Dataset for Identifying Shortcomings of Neural Models in Monotonicity Reasoning", |
|
"authors": [ |
|
{ |
|
"first": "Hitomi", |
|
"middle": [], |
|
"last": "Yanaka", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Ochanomizu University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Koji", |
|
"middle": [], |
|
"last": "Mineshima", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Ochanomizu University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Daisuke", |
|
"middle": [], |
|
"last": "Bekki", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Ochanomizu University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Kentaro", |
|
"middle": [], |
|
"last": "Inui", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tohoku University", |
|
"location": { |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Satoshi", |
|
"middle": [], |
|
"last": "Sekine", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Lasha", |
|
"middle": [], |
|
"last": "Abzianidze", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Groningen", |
|
"location": { |
|
"country": "Netherlands" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Johan", |
|
"middle": [], |
|
"last": "Bos", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Groningen", |
|
"location": { |
|
"country": "Netherlands" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Riken", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Large crowdsourced datasets are widely used for training and evaluating neural models on natural language inference (NLI). Despite these efforts, neural models have a hard time capturing logical inferences, including those licensed by phrase replacements, socalled monotonicity reasoning. Since no large dataset has been developed for monotonicity reasoning, it is still unclear whether the main obstacle is the size of datasets or the model architectures themselves. To investigate this issue, we introduce a new dataset, called HELP, for handling entailments with lexical and logical phenomena. We add it to training data for the state-of-the-art neural models and evaluate them on test sets for monotonicity phenomena. The results showed that our data augmentation improved the overall accuracy. We also find that the improvement is better on monotonicity inferences with lexical replacements than on downward inferences with disjunction and modification. This suggests that some types of inferences can be improved by our data augmentation while others are immune to it.", |
|
"pdf_parse": { |
|
"paper_id": "S19-1027", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Large crowdsourced datasets are widely used for training and evaluating neural models on natural language inference (NLI). Despite these efforts, neural models have a hard time capturing logical inferences, including those licensed by phrase replacements, socalled monotonicity reasoning. Since no large dataset has been developed for monotonicity reasoning, it is still unclear whether the main obstacle is the size of datasets or the model architectures themselves. To investigate this issue, we introduce a new dataset, called HELP, for handling entailments with lexical and logical phenomena. We add it to training data for the state-of-the-art neural models and evaluate them on test sets for monotonicity phenomena. The results showed that our data augmentation improved the overall accuracy. We also find that the improvement is better on monotonicity inferences with lexical replacements than on downward inferences with disjunction and modification. This suggests that some types of inferences can be improved by our data augmentation while others are immune to it.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Natural language inference (NLI) has been proposed as a benchmark task for natural language understanding. This task is to determine whether a given statement (premise) semantically entails another statement (hypothesis) (Dagan et al., 2013) . Large crowdsourced datasets such as SNLI (Bowman et al., 2015a) and MultiNLI (Williams et al., 2018) have been created from naturally-occurring texts for training and testing neural models on NLI. Recent reports showed that these crowdsourced datasets contain undesired biases that allow prediction of entailment labels only from hypothesis sentences (Gururangan et al., 2018; Poliak et al., 2018b; Tsuchiya, 2018) . Moreover, these standard datasets come with the so-called", |
|
"cite_spans": [ |
|
{ |
|
"start": 221, |
|
"end": 241, |
|
"text": "(Dagan et al., 2013)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 285, |
|
"end": 307, |
|
"text": "(Bowman et al., 2015a)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 321, |
|
"end": 344, |
|
"text": "(Williams et al., 2018)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 595, |
|
"end": 620, |
|
"text": "(Gururangan et al., 2018;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 621, |
|
"end": 642, |
|
"text": "Poliak et al., 2018b;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 643, |
|
"end": 658, |
|
"text": "Tsuchiya, 2018)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Some changes in personal values are simply part of growing older (MultiNLI) \u21d2 Some changes in values are a part of growing older Downward At most ten commissioners spend time at home (FraCaS)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Upward", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u21d2 At most ten female commissioners spend time at home upward monotonicity inferences (see Table 1 ), i.e., inferences from subsets to supersets (changes in personal values changes in values), but they rarely come with downward monotonicity inferences, i.e., inferences from supersets to subsets (commissioners female commissioners). Downward monotonicity inferences are interesting in that they allow to replace a phrase with a more specific one and thus the resulting sentence can become longer, yet the inference is valid. FraCaS (Cooper et al., 1994) contains such logically challenging problems as downward inferences. However, it is small in size (only 346 examples) for training neural models, and it covers only simple syntactic patterns with severely restricted vocabularies. The lack of such a dataset on a large scale is due to at least two factors: it is hard to instruct crowd workers without deep knowledge of natural language syntax and semantics, and it is also unfeasible to employ experts to obtain a large number of logically challenging inferences.", |
|
"cite_spans": [ |
|
{ |
|
"start": 532, |
|
"end": 553, |
|
"text": "(Cooper et al., 1994)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 90, |
|
"end": 97, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Upward", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Bowman et al. (2015b) proposed an artificial dataset for logical reasoning, whose premise and hypothesis are automatically generated from a simple English-like grammar. Following this line of work, Geiger et al. (2018) presented a method to construct a complex dataset for multiple quantifiers (e.g., Every dwarf licks no rifle \u21d2 No ugly dwarf licks some rifle). These datasets contain downward inferences, but they are designed not to require lexical knowledge. There are also NLI datasets which expand lexical knowledge by replacing words using lexical rules (Monz and de Rijke, 2001; Glockner et al., 2018; Naik et al., 2018; Poliak et al., 2018a) . In these works, however, little attention has been paid to downward inferences.", |
|
"cite_spans": [ |
|
{ |
|
"start": 198, |
|
"end": 218, |
|
"text": "Geiger et al. (2018)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 561, |
|
"end": 586, |
|
"text": "(Monz and de Rijke, 2001;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 587, |
|
"end": 609, |
|
"text": "Glockner et al., 2018;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 610, |
|
"end": 628, |
|
"text": "Naik et al., 2018;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 629, |
|
"end": 650, |
|
"text": "Poliak et al., 2018a)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Upward", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The GLUE leaderboard (Wang et al., 2019) reported that neural models did not perform well on downward inferences, and this leaves us guessing whether the lack of large datasets for such kind of inferences that involve the interaction between lexical and logical inferences is an obstacle of understanding inferences for neural models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 40, |
|
"text": "(Wang et al., 2019)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Upward", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To shed light on this problem, this paper makes the following three contributions: (a) providing a method to create a large NLI dataset 1 that embodies the combination of lexical and logical inferences focusing on monotonicity (i.e., phrase replacement-based reasoning) (Section 3), (b) measuring to what extent the new dataset helps neural models to learn monotonicity inferences, and (c) by analyzing the results, revealing which types of logical inferences are solved with our training data augmentation and which ones are immune to it (Section 4.2).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Upward", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Monotonicity reasoning is a sort of reasoning based on word replacement. Based on the monotonicity properties of words, it determines whether a certain word replacement results in a sentence entailed from the original one (van Benthem, 1983; Icard and Moss, 2014) . A polarity is a characteristic of a word position imposed by monotone operators. Replacements with more general (or specific) phrases in \u2191 (or \u2193) polarity positions license entailment. Polarities are determined by a function which is always upward monotone (+) (i.e., an order preserving function that licenses entailment from specific to general phrases), always downward monotone (\u2212) (i.e., an order reversing function) or neither, non-monotone.", |
|
"cite_spans": [ |
|
{ |
|
"start": 222, |
|
"end": 241, |
|
"text": "(van Benthem, 1983;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 242, |
|
"end": 263, |
|
"text": "Icard and Moss, 2014)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Monotonicity Reasoning", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Determiners are modeled as binary operators, taking noun and verb phrases as the first and second arguments, respectively, and they entail sentences with their arguments narrowed or broadened according to their monotonicity properties. For example, the determiner some is upward monotone both in its first and second arguments, and the concepts can be broadened by replacing its hypernym (people boy), removing modifiers (dancing happily dancing), or adding disjunction. The concepts can be narrowed by replacing its hyponym (schoolboy boy), adding modifiers, or adding conjunction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Monotonicity Reasoning", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "( 1) If a sentence contains negation, the polarity of words over the scope of negation is reversed:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Monotonicity Reasoning", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "(2) No [ NP boys\u2193] \u2212 [ VP are happily dancing\u2193] \u2212 No [ NP one] [ VP is dancing] \u21d2 No [ NP schoolboys] [ VP are dancing and singing]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Monotonicity Reasoning", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "If the propositional object is embedded in another negative or conditional context, the polarity of words over its scope can be reversed again:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Monotonicity Reasoning", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "(3) If [there are no [ NP boys\u2191] \u2212 [ VP dancing happily\u2191] \u2212 ] \u2212 ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Monotonicity Reasoning", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "[the party might be canceled]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Monotonicity Reasoning", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "+ \u21d2 If [there is no [ NP one] [ VP dancing]],", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Monotonicity Reasoning", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "[the party might be canceled]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Monotonicity Reasoning", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this way, the polarity of words is determined by monotonicity operators and syntactic structures.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Monotonicity Reasoning", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We address three issues when creating the inference problems: (a) Detect the monotone operators and their arguments; (b) Based on the syntactic structure, induce the polarity of the argument positions; (c) Using lexical knowledge or logical connectives, narrow or broaden the arguments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Creation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We use sentences from the Parallel Meaning Bank (PMB, Abzianidze et al., 2017) as a source while creating the inference dataset. The reason behind choosing the PMB is threefold. First, the finegrained annotations in the PMB facilitate our automatic monotonicity-driven construction of inference problems. In particular, semantic tokenization and WordNet (Fellbaum, 1998) senses make narrow and broad concept substitutions easy while the syntactic analyses in Combinatory Categorial Grammar (CCG, Steedman, 2000) format and semantic tags (Abzianidze and Bos, 2017) contribute to monotonicity and polarity detection. Second, the PMB contains lexically and syntactically diverse texts from a wide range of genres. Third, the gold (silver) documents are fully (partially) manually verified, which control noise in the automated generated dataset. To prevent easy inferences, we use the sentences with more than five tokens from 5K gold and 5K silver portions of the PMB.", |
|
"cite_spans": [ |
|
{ |
|
"start": 354, |
|
"end": 370, |
|
"text": "(Fellbaum, 1998)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 496, |
|
"end": 511, |
|
"text": "Steedman, 2000)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Source corpus", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Step 1. Select a sentence using semantic tags from the PMB All kids were dancing on the floor AND CON PST EXG REL DEF CON", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Source corpus", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Step 2. Detect the polarity of constituents via CCG analysis Figure 1 : Illustration for creating the HELP dataset. Figure 1 illustrates the method of creating the HELP dataset. We use declarative sentences from the PMB containing monotone operators, conjunction, or disjunction as a source (Step 1). These target words can be identified by their semantic tags: AND (all, every, each, and), DIS (some, several, or), NEG (no, not, neither, without), DEF (both), QUV (many, few), and IMP (if, when, unless). In", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 69, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 116, |
|
"end": 124, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Source corpus", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Step 2, after locating the first (NP) and the second (VP) arguments of the monotone operator via a CCG derivation, we detect their polarities with the possibility of reversing a polarity if an argument appears in a downward environment. In", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Step 3, to broaden or narrow the first and the second arguments, we consider two types of operations: (i) lexical replacement, i.e., substituting the argument with its hypernym/hyponym (e.g., H 1 ) and (ii) syntactic elimination, i.e., dropping a modifier or a conjunction/disjunction phrase in the argument (e.g., H 2 ). Given the polarity of the argument position (\u2191 or \u2193) and the type of replacement (with more general or specific phrases), the gold label (entailment or neutral) of a premisehypothesis pair is automatically determined; e.g., both (P, H 1 ) and (P, H 2 ) in Step 3 are assigned entailment. For lexical replacements, we use WordNet senses from the PMB and their ISA relations with the same part-of-speech to control naturalness of the obtained sentence. To compensate missing word senses from the silver documents, we use the Lesk algorithm (Lesk, 1986) . In Step 4, by swapping the premise and the hypothesis, we create another inference pair and assign its gold label; e.g., (P 1 , H ) and (P 2 , H ) are created and assigned neutral. By swapping a sentence pair created by syntactic elimination, we can create a pair such as (P 2 , H ) in which the hypothesis is more specific than the premise.", |
|
"cite_spans": [ |
|
{ |
|
"start": 860, |
|
"end": 872, |
|
"text": "(Lesk, 1986)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The resulting dataset has 36K inference pairs consisting of upward monotone, downward monotone, non-monotone, conjunction, and disjunction. Table 2 shows some examples. The number of vocabulary items is 15K. We manually checked the naturalness of randomly sampled 500 sentence pairs, of which 146 pairs were unnatural. As mentioned in previous work (Glockner et al., 2018) , there are some cases where WordNet for substitution leads to unnatural sentences due to the context mismatch; e.g., an example such as P: You have no driving happening \u21d2 H: You have no driving experience, where P is obtained from H by replacing experience by its hypernym happening. Since our intention is to explore possible ways to augment training data for monotonicity reasoning, we include these cases in the training dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 349, |
|
"end": 372, |
|
"text": "(Glockner et al., 2018)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 147, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The HELP dataset", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We use HELP as additional training material for three neural models for NLI and evaluate them on test sets dealing with monotonicity reasoning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Models We used three models: BERT (Devlin et al., 2019) , BiLSTM+ELMo+Attn (Wang et al., 2019) , and ESIM (Chen et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 34, |
|
"end": 55, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 75, |
|
"end": 94, |
|
"text": "(Wang et al., 2019)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 106, |
|
"end": 125, |
|
"text": "(Chen et al., 2017)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental settings", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Training data We used three different training sets and compared their performance; MultiNLI (392K), MultiNLI+MQ (the dataset for multiple quantifiers introduced in Section 1; Geiger et al., 2018) (892K), and MultiNLI+HELP (429K).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental settings", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We used four test sets: (i) the GLUE diagnostic dataset (Wang et al., 2019) conjunction, and disjunction sections), (ii) Fra-CaS (the generalized quantifier section), (iii) the SICK (Marelli et al., 2014) test set, and (iv) MultiNLI matched/mismatched test set. We used the Matthews correlation coefficient (ranging [\u22121, 1]) as the evaluation metric for GLUE. Regarding other datasets, we used accuracy as the metric. We also check if our data augmentation does not decrease the performance on MultiNLI. Table 3 shows that adding HELP to MultiNLI improved the accuracy of all models on GLUE, Fra-CaS, and SICK. Regarding MultiNLI, note that adding data for downward inference can be harmful for performing upward inference, because lexical replacements work in an opposite way in downward environments. However, our data augmentation minimized the decrease in performance on MultiNLI. This suggests that models managed to learn the relationships between downward operators and their arguments from HELP.", |
|
"cite_spans": [ |
|
{ |
|
"start": 56, |
|
"end": 75, |
|
"text": "(Wang et al., 2019)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 182, |
|
"end": 204, |
|
"text": "(Marelli et al., 2014)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 504, |
|
"end": 511, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Test data", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The improvement in accuracy is better with HELP than that with MQ despite the fact that the size of HELP is much smaller than MQ. MQ does not deal with lexical replacements, and thus the improvement is not stable. This indicates that the improvement comes from carefully controlling the target reasoning of the training set rather than from its size. ESIM showed a greater improvement in accuracy compared with the other models when we added HELP. This result arguably supports the finding in Bowman et al. (2015b) that a tree architecture is better for learning some logical inferences. Regarding the evaluation on SICK, Talman and Chatzikyriakidis (2018) reported a drop in accuracy of 40-50% when BiLSTM and ESIM were trained on MultiNLI because SICK is out of the domain of MultiNLI. Indeed, the accuracy of each model, including BERT, was low at 40-60%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and discussion", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "When compared among linguistic phenomena, the improvement by adding HELP was better for upward and downward monotone. In particular, all models except models trained with HELP failed to answer 68 problems for monotonicity inferences with lexical replacements. This indicates that such inferences can be improved by adding HELP. The improvement for disjunction was smaller than other phenomena. To investigate this, we conducted error analysis on 68 problems of GLUE and FraCaS, which all the models misclassified. 44 problems are neutral problems in which all words in the hypothesis occur in the premise (e.g.,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and discussion", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "He is in London). 13 problems are entailment problems in which the hypothesis contains a word or a phrase not occurring in the premise (e.g., I don't want to have to keep entertaining people \u21d2 I don't want to have to keep entertaining people who don't value my time). These problems contain disjunction or modifiers in downward environments where either (i) the premise P contains all words in the hypothesis H yet the inference is invalid or (ii) H contains more words than those in P yet the inference is valid. 2 Although HELP contains 21K such problems, the models nevertheless misclassified them. This indicates that the difficulty in learning these non-lexical downward inferences might not come from the lack of training datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "He is either in London or in Paris", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We introduced a monotonicity-driven NLI data augmentation method. The experiments showed that neural models trained on HELP obtained the higher overall accuracy. However, the improvement tended to be small on downward monotone inferences with disjunction and modification, which suggests that some types of inferences can be improved by adding data while others might require different kind of help.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "For future work, our data augmentation can be used for multilingual corpora. Since the PMB annotations sufficed for creating HELP, applying our method to the non-English PMB documents seems straightforward. Additionally, it is interesting to verify the quality and contribution of a dataset which will be created by using our method on an automatically annotated and parsed corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Our dataset and its generation code will be made publicly available at https://github.com/verypluming/HELP.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Interestingly, certain logical inferences including disjunction and downward monotonicity are difficult also for humans to get(Geurts and van der Slik, 2005).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank our three anonymous reviewers for helpful suggestions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgement", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "The Parallel Meaning Bank: Towards a multilingual corpus of translations annotated with compositional meaning representations", |
|
"authors": [ |
|
{ |
|
"first": "Lasha", |
|
"middle": [], |
|
"last": "Abzianidze", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Bjerva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kilian", |
|
"middle": [], |
|
"last": "Evang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hessel", |
|
"middle": [], |
|
"last": "Haagsma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rik", |
|
"middle": [], |
|
"last": "Van Noord", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierre", |
|
"middle": [], |
|
"last": "Ludmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Duc-Duy", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johan", |
|
"middle": [], |
|
"last": "Bos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "242--247", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lasha Abzianidze, Johannes Bjerva, Kilian Evang, Hessel Haagsma, Rik van Noord, Pierre Ludmann, Duc-Duy Nguyen, and Johan Bos. 2017. The Paral- lel Meaning Bank: Towards a multilingual corpus of translations annotated with compositional meaning representations. In Proceedings of the 15th Confer- ence of the European Chapter of the Association for Computational Linguistics, pages 242-247.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Towards universal semantic tagging", |
|
"authors": [ |
|
{ |
|
"first": "Lasha", |
|
"middle": [], |
|
"last": "Abzianidze", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johan", |
|
"middle": [], |
|
"last": "Bos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 12th International Conference on Computational Semantics (IWCS 2017)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--6", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lasha Abzianidze and Johan Bos. 2017. Towards uni- versal semantic tagging. In Proceedings of the 12th International Conference on Computational Seman- tics (IWCS 2017), pages 1-6.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A large annotated corpus for learning natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Samuel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabor", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Angeli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Potts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "632--642", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samuel R. Bowman, Gabor Angeli, Christopher Potts, and Christopher D. Manning. 2015a. A large anno- tated corpus for learning natural language inference. In Proceedings of the 2015 Conference on Empiri- cal Methods in Natural Language Processing, pages 632-642.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Recursive neural networks can learn logical semantics", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Samuel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Potts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 3rd Workshop on Continuous Vector Space Models and their Compositionality", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "12--21", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samuel R. Bowman, Christopher Potts, and Christo- pher D. Manning. 2015b. Recursive neural networks can learn logical semantics. In Proceedings of the 3rd Workshop on Continuous Vector Space Models and their Compositionality, pages 12-21.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Enhanced lstm for natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "Qian", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodan", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhen-Hua", |
|
"middle": [], |
|
"last": "Ling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Si", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hui", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diana", |
|
"middle": [], |
|
"last": "Inkpen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1657--1668", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qian Chen, Xiaodan Zhu, Zhen-Hua Ling, Si Wei, Hui Jiang, and Diana Inkpen. 2017. Enhanced lstm for natural language inference. In Proceedings of the 55th Annual Meeting of the Association for Compu- tational Linguistics, pages 1657-1668.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "FraCaS-a framework for computational semantics", |
|
"authors": [ |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Cooper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Crouch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Van Eijck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Fox", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Van Genabith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Jaspers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hans", |
|
"middle": [], |
|
"last": "Kamp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manfred", |
|
"middle": [], |
|
"last": "Pinkal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Massimo", |
|
"middle": [], |
|
"last": "Poesio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Pulman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robin Cooper, Richard Crouch, Jan van Eijck, Chris Fox, Josef van Genabith, Jan Jaspers, Hans Kamp, Manfred Pinkal, Massimo Poesio, Stephen Pulman, et al. 1994. FraCaS-a framework for computational semantics. Deliverable, D6.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Recognizing Textual Entailment: Models and Applications. Synthesis Lectures on Human Language Technologies", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Ido Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabio", |
|
"middle": [ |
|
"Massimo" |
|
], |
|
"last": "Sammons", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zanzotto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ido Dagan, Dan Roth, Mark Sammons, and Fabio Mas- simo Zanzotto. 2013. Recognizing Textual Entail- ment: Models and Applications. Synthesis Lectures on Human Language Technologies. Morgan & Clay- pool Publishers.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "WordNet: An Electronic Lexical Database. Language, Speech, and Communication", |
|
"authors": [ |
|
{ |
|
"first": "Christiane", |
|
"middle": [], |
|
"last": "Fellbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christiane Fellbaum. 1998. WordNet: An Electronic Lexical Database. Language, Speech, and Commu- nication. MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Stress-testing neural models of natural language inference with multiplyquantified sentences", |
|
"authors": [ |
|
{ |
|
"first": "Atticus", |
|
"middle": [], |
|
"last": "Geiger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ignacio", |
|
"middle": [], |
|
"last": "Cases", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lauri", |
|
"middle": [], |
|
"last": "Karttunen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Potts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Atticus Geiger, Ignacio Cases, Lauri Karttunen, and Christopher Potts. 2018. Stress-testing neural mod- els of natural language inference with multiply- quantified sentences. CoRR, abs/1810.13033.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Monotonicity and processing load", |
|
"authors": [ |
|
{ |
|
"first": "Bart", |
|
"middle": [], |
|
"last": "Geurts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frans", |
|
"middle": [], |
|
"last": "Van Der Slik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Journal of Semantics", |
|
"volume": "22", |
|
"issue": "1", |
|
"pages": "97--117", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bart Geurts and Frans van der Slik. 2005. Mono- tonicity and processing load. Journal of Semantics, 22(1):97-117.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Breaking NLI systems with sentences that require simple lexical inferences", |
|
"authors": [ |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Glockner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vered", |
|
"middle": [], |
|
"last": "Shwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "650--655", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Max Glockner, Vered Shwartz, and Yoav Goldberg. 2018. Breaking NLI systems with sentences that re- quire simple lexical inferences. In Proceedings of the 56th Annual Meeting of the Association for Com- putational Linguistics, pages 650-655.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Annotation artifacts in natural language inference data", |
|
"authors": [ |
|
{ |
|
"first": "Swabha", |
|
"middle": [], |
|
"last": "Suchin Gururangan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Swayamdipta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "107--112", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Suchin Gururangan, Swabha Swayamdipta, Omer Levy, Roy Schwartz, Samuel Bowman, and Noah A. Smith. 2018. Annotation artifacts in natural lan- guage inference data. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 107-112.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Recent progress in monotonicity", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Icard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lawrence", |
|
"middle": [], |
|
"last": "Moss", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "LILT", |
|
"issue": "", |
|
"pages": "167--194", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Icard and Lawrence Moss. 2014. Recent progress in monotonicity. LILT, 9(7):167-194.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Automatic sense disambiguation using machine readable dictionaries: How to tell a pine cone from an ice cream cone", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Lesk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1986, |
|
"venue": "Proceedings of the 5th Annual International Conference on Systems Documentation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "24--26", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Lesk. 1986. Automatic sense disambiguation using machine readable dictionaries: How to tell a pine cone from an ice cream cone. In Proceedings of the 5th Annual International Conference on Systems Documentation, pages 24-26.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "A SICK cure for the evaluation of compositional distributional semantic models", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Marelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefano", |
|
"middle": [], |
|
"last": "Menini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Baroni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luisa", |
|
"middle": [], |
|
"last": "Bentivogli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raffaella", |
|
"middle": [], |
|
"last": "Bernardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Zamparelli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 9th International Conference on Language Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "216--223", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Marelli, Stefano Menini, Marco Baroni, Luisa Bentivogli, Raffaella Bernardi, and Roberto Zam- parelli. 2014. A SICK cure for the evaluation of compositional distributional semantic models. In Proceedings of the 9th International Conference on Language Resources and Evaluation, pages 216- 223.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Lightweight entailment checking for computational semantics", |
|
"authors": [ |
|
{ |
|
"first": "Christof", |
|
"middle": [], |
|
"last": "Monz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Maarten De Rijke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the 3rd International Workshop on Inference in Computational Semantics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--15", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christof Monz and Maarten de Rijke. 2001. Light- weight entailment checking for computational se- mantics. In Proceedings of the 3rd International Workshop on Inference in Computational Semantics, pages 1-15.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Stress test evaluation for natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "Aakanksha", |
|
"middle": [], |
|
"last": "Naik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhilasha", |
|
"middle": [], |
|
"last": "Ravichander", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Norman", |
|
"middle": [], |
|
"last": "Sadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carolyn", |
|
"middle": [], |
|
"last": "Rose", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2340--2353", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aakanksha Naik, Abhilasha Ravichander, Norman Sadeh, Carolyn Rose, and Graham Neubig. 2018. Stress test evaluation for natural language inference. In Proceedings of the 27th International Conference on Computational Linguistics, pages 2340-2353.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Collecting diverse natural language inference problems for sentence representation evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Poliak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aparajita", |
|
"middle": [], |
|
"last": "Haldar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Rudinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"Edward" |
|
], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aaron", |
|
"middle": [ |
|
"Steven" |
|
], |
|
"last": "White", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "337--340", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Poliak, Aparajita Haldar, Rachel Rudinger, J. Edward Hu, Ellie Pavlick, Aaron Steven White, and Benjamin Van Durme. 2018a. Collecting di- verse natural language inference problems for sen- tence representation evaluation. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Ana- lyzing and Interpreting Neural Networks for NLP, pages 337-340.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Hypothesis only baselines in natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Poliak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Naradowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aparajita", |
|
"middle": [], |
|
"last": "Haldar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Rudinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Seventh Joint Conference on Lexical and Computational Semantics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "180--191", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Poliak, Jason Naradowsky, Aparajita Haldar, Rachel Rudinger, and Benjamin Van Durme. 2018b. Hypothesis only baselines in natural language in- ference. In Proceedings of the Seventh Joint Con- ference on Lexical and Computational Semantics, pages 180-191.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "The Syntactic Process", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Steedman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Steedman. 2000. The Syntactic Process. MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Testing the generalization power of neural network models across NLI benchmarks", |
|
"authors": [ |
|
{ |
|
"first": "Aarne", |
|
"middle": [], |
|
"last": "Talman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stergios", |
|
"middle": [], |
|
"last": "Chatzikyriakidis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aarne Talman and Stergios Chatzikyriakidis. 2018. Testing the generalization power of neural net- work models across NLI benchmarks. CoRR, abs/1810.09774.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Performance impact caused by hidden bias of training data for recognizing textual entailment", |
|
"authors": [ |
|
{ |
|
"first": "Masatoshi", |
|
"middle": [], |
|
"last": "Tsuchiya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 11th International Conference on Language Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Masatoshi Tsuchiya. 2018. Performance impact caused by hidden bias of training data for recogniz- ing textual entailment. In Proceedings of the 11th International Conference on Language Resources and Evaluation.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "GLUE: A multi-task benchmark and analysis platform for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. 2019. GLUE: A multi-task benchmark and analysis plat- form for natural language understanding. In Inter- national Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "A broad-coverage challenge corpus for sentence understanding through inference", |
|
"authors": [ |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Nangia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1112--1122", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adina Williams, Nikita Nangia, and Samuel Bowman. 2018. A broad-coverage challenge corpus for sen- tence understanding through inference. In Proceed- ings of the 2018 Conference of the North Ameri- can Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1112-1122.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF0": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Upward and downward inferences." |
|
}, |
|
"TABREF1": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Some [ NP boys\u2191] + [ VP are happily dancing\u2191] + \u21d2 Some [ NP people] [ VP are dancing] Some [ NP schoolboys] [ VP are dancing and singing]" |
|
}, |
|
"TABREF2": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "All [ NP kids\u2193] were [ VP dancing on the floor\u2191] Step 3. Replace expressions based on monotonicity P : All [ NP kids\u2193] [ VP were dancing on the floor\u2191] H1: All [ NP foster children] [ VP were dancing on the floor] ENTAIL H2: All [ NP kids] [ VP were dancing]" |
|
}, |
|
"TABREF3": { |
|
"content": "<table><tr><td colspan=\"2\">Section Size</td><td>Example</td></tr><tr><td colspan=\"3\">Up Tom bought some Non 7784 Shakespeare wrote both tragedy and comedy* 1105 Shakespeare wrote both tragedy and drama</td></tr><tr><td>Conj</td><td>6076</td><td>Tom removed his glasses Tom removed his glasses and rubbed his eyes*</td></tr><tr><td>Disj</td><td>438</td><td>The trees are barren \u21d2The trees are barren or bear only small fruit*</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Mexican sunflowers for Mary \u21d2Tom bought some flowers for Mary* Down 21192 If there's no water, there's no whisky* \u21d2If there's no facility, there's no whisky" |
|
}, |
|
"TABREF4": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Examples in HELP. The sentence with an asterisk is the original sentence from the PMB." |
|
}, |
|
"TABREF5": { |
|
"content": "<table><tr><td/><td/><td/><td/><td/><td/><td/><td colspan=\"2\">GLUE diagnostic</td><td/><td/><td/><td>FraCaS</td><td>SICK</td><td>MNLI</td></tr><tr><td>Model</td><td>Train Data</td><td colspan=\"2\">Up (30)</td><td colspan=\"2\">Down (30)</td><td colspan=\"2\">Non (22)</td><td colspan=\"2\">Conj (32)</td><td>Disj (38)</td><td/><td>Total (152)</td><td>(80)</td><td>(4927)</td><td>match (10000)</td><td>mismatch (10000)</td></tr><tr><td/><td>MNLI</td><td>50.4</td><td/><td>-67.5</td><td/><td>23.1</td><td/><td>52.5</td><td/><td>-6.1</td><td/><td>17.8</td><td>65.0</td><td>55.4</td><td>84.6</td><td>83.4</td></tr><tr><td>BERT</td><td>+MQ</td><td>59.6</td><td colspan=\"4\">+9.2 -49.3 +18.2 14.0</td><td colspan=\"2\">-9.1 62.1</td><td colspan=\"4\">+9.6 -18.8 -12.7 26.3</td><td>+8.5 68.8 +3.8 58.2</td><td>+2.8 78.4 -6.2 78.6 -4.8</td></tr><tr><td/><td colspan=\"12\">+HELP 67.0 +4.6 84.4 -0.2 83.1 -0.3</td></tr><tr><td colspan=\"2\">BiLSTM MNLI</td><td>22.2</td><td/><td>-9.4</td><td/><td>-2.7</td><td/><td>42.4</td><td/><td>-9.9</td><td/><td>-3.5</td><td>68.9</td><td>53.8</td><td>76.4</td><td>76.1</td></tr><tr><td>+ELMo</td><td>+MQ</td><td>22.2</td><td>0.0</td><td colspan=\"3\">8.1 +17.5 -5.7</td><td colspan=\"2\">-3.0 42.4</td><td>0.0</td><td>-9.8</td><td>+0.1</td><td>5.7</td><td>+9.2 65.9 -3.0 54.0</td><td>+0.2 71.4 -5.0 70.7 -5.4</td></tr><tr><td>+Attn</td><td colspan=\"5\">+HELP 32.4 +10.2 22.9 +32.3</td><td>3.7</td><td colspan=\"2\">+6.4 45.6</td><td>+3.2</td><td>-9.9</td><td colspan=\"2\">0.0 17.0 +20.5 71.3 +2.4 54.0</td><td>+0.2 75.2 -1.2 74.1 -2.0</td></tr><tr><td/><td>MNLI</td><td>14.9</td><td/><td>-14.0</td><td/><td>6.0</td><td/><td>29.8</td><td/><td>-3.6</td><td/><td>1.1</td><td>47.5</td><td>43.9</td><td>71.3</td><td>70.7</td></tr><tr><td>ESIM</td><td>+MQ</td><td colspan=\"2\">27.2 +12.3</td><td>-7.8</td><td>+6.2</td><td>3.4</td><td>-2.6</td><td colspan=\"3\">5.2 -24.6 -13.0</td><td>-9.4</td><td>6.8</td><td>+5.7 43.7 -3.8 53.1</td><td>+9.2 68.6 -3.7 68.2 -2.5</td></tr><tr><td/><td colspan=\"5\">+HELP 31.4 +16.5 24.7 +38.7</td><td>8.0</td><td colspan=\"2\">+2.0 32.6</td><td>+2.8</td><td colspan=\"3\">7.1 +10.7 27.0 +25.9 48.8 +1.3 56.6 +12.7 71.1 -0.2 70.1 -0.6</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>(upward</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"3\">monotone, downward monotone, non-monotone,</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "+16.6 29.8 +97.3 47.9 +24.8 72.1 +19.6 -4.1 +2.0 51.2 +33.4 68.8 +3.8 60.0" |
|
}, |
|
"TABREF6": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Evaluation results on the GLUE diagnostic dataset, FraCaS, SICK, and MultiNLI (MNLI). The number in parentheses is the number of problems in each test set. is the difference from the model trained on MNLI." |
|
} |
|
} |
|
} |
|
} |