|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T03:10:28.711602Z" |
|
}, |
|
"title": "Human-Model Divergence in the Handling of Vagueness", |
|
"authors": [ |
|
{ |
|
"first": "Elias", |
|
"middle": [], |
|
"last": "Stengel", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Johns Hopkins University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Eskin", |
|
"middle": [], |
|
"last": "Jimena", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Johns Hopkins University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Guallar-Blasco", |
|
"middle": [], |
|
"last": "Benjamin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Johns Hopkins University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Van", |
|
"middle": [], |
|
"last": "Durme", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Johns Hopkins University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "While aggregate performance metrics can generate valuable insights at a large scale, their dominance means more complex and nuanced language phenomena, such as vagueness, may be overlooked. Focusing on vague terms (e.g. sunny, cloudy, young, etc.) we inspect the behavior of visually grounded and text-only models, finding systematic divergences from human judgments even when a model's overall performance is high. To help explain this disparity, we identify two assumptions made by the datasets and models examined and, guided by the philosophy of vagueness, isolate cases where they do not hold.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "While aggregate performance metrics can generate valuable insights at a large scale, their dominance means more complex and nuanced language phenomena, such as vagueness, may be overlooked. Focusing on vague terms (e.g. sunny, cloudy, young, etc.) we inspect the behavior of visually grounded and text-only models, finding systematic divergences from human judgments even when a model's overall performance is high. To help explain this disparity, we identify two assumptions made by the datasets and models examined and, guided by the philosophy of vagueness, isolate cases where they do not hold.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Part of the power of language as a medium for communication is rooted in having a reliable mapping between language and the world: we typically expect language to be used in a consistent fashion, i.e. the word \"dog\" refers to a relatively invariant group of animals, and not to a different set of items each time we use it. This view of language dovetails with the supervised learning paradigm, where we assume that an approximation of such a mapping can be learned from labeled examples-often collected via manual annotation by crowdworkers. In natural language processing (NLP), this learning typically takes place by treating tasks as classification problems which optimize for log-likelihood. While this paradigm has been extensively and successfully applied in NLP, it is not without both practical and theoretical shortcomings. Guided by notions from the philosophy of language, we propose that borderline cases of vague terms, where the mapping between inputs and outputs is unclear, represent an edge case for the assumptions made by the supervised paradigm, and result in systematic divergences between human and model behavior.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\"Is the sky cloudy?\" \"Is the sky cloudy?\" \"Is it cloudy?\" Figure 1 : Given a binary question involving a vague term (in this case, cloudy) humans hedge between \"yes\" and \"no,\" following a sigmoid curve with borderline examples falling in the middle. Standard error (grey band) shows that annotator agree even in borderline regions. In contrast, model predictions remain at extreme ends.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 58, |
|
"end": 66, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To demonstrate this, we begin by identifying a set of canonically vague terms in the binary question subset of the Visual Question Answering (VQA) and GQA datasets (Antol et al., 2015; Goyal et al., 2017; Hudson and Manning, 2019) and isolating a subset of images, questions, and answers from these datasets centered around these terms. Using this subset, we show that while the accuracy of LXMERT (Tan and Bansal, 2019) on non-borderline cases is very high, its performance drops-sometimes dramatically-on borderline cases. We then compare the behavior of the model against that of human annotators, finding that while humans display behavior which aligns with theories of meaning for vague terms, model behavior is less predictable.", |
|
"cite_spans": [ |
|
{ |
|
"start": 164, |
|
"end": 184, |
|
"text": "(Antol et al., 2015;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 185, |
|
"end": 204, |
|
"text": "Goyal et al., 2017;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 205, |
|
"end": 230, |
|
"text": "Hudson and Manning, 2019)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 398, |
|
"end": 420, |
|
"text": "(Tan and Bansal, 2019)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We extend our analysis of visually-grounded terms to a text-only case, re-framing the catego-rization of statements into true statements and false ones as a task involving vagueness. Controlling for world knowledge, we find that while probes over contextualized encoders can classify statements significantly better than random, their output distributions are strikingly similar to those observed in the visually-grounded case. When contrasted with scalar annotations collected from crowdworkers, these results support the notion that analytic truth itself admits of borderline cases and poses problems for supervised systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In \u00a7 2, we provide a more thorough definition of terms used, the motivation for exploring vagueness, and the underlying assumptions of supervised learning that are violated by vague terms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Vague terms, broadly speaking, are ones that admit of borderline cases; for example: cloudy is vague because, while there are clearly cloudy and not cloudy days, there are also cases where the best response to the question \"is it cloudy?\" might be \"somewhat\" rather than a definitive \"yes\" or \"no.\" Given this definition, we can see that a large portion of the predicates we use in every-day speech are vague. This even encompasses predicates such as is true and is false, as we might have statements that are true or false to varying degrees.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation and Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Vague predicates in particular have been a focus of the philosophy of language, as they represent an interesting edge case for theories of meaning. Take, for example, a canonical example of a vague predicate from philosophy: is a heap. There are things that are undeniable heaps, and others that are clearly not. In the extreme case, we can imagine starting with a heap of sand (say, N grains) and removing a single grain of sand from it. Clearly, the resulting mass would still be a heap. This is, however, a dangerous precedent; we can now remove N \u2212 2 grains on sand until we have a single grain remaining, whose heap-ness is hard to justify, but which, by induction, is still a heap. This raises important questions: how is it that speakers avoid this paradox and are able to use and understand vague terms, even in borderline cases? Is there a definitive point at which a heap becomes a non-heap? The answers to these questions should influence how we annotate the data from which we aim to learn meaning representations of vague terms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation and Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "While the unequivocal instances of heaps fit well into the current paradigm of supervised learning with categorical labels, borderline heaps do present a problem. Recall that the first assumption by supervised learning which we have pointed out is that the ideal mapping between the input (in this case, questions and images) and the the label set (answers) is largely fixed. For example, given the question \"Is this a dog?\" we assume that the set of things in the world which we call \"dog\", also known as the extension of \"dog\", remains constant. In that case, the annotator's response to the question corresponds to whether what the image depicts could be plausibly considered as part of the extension of \"dog.\" While we might easily be able to determine the set membership of poodles and terriers, we may have a harder time with Jack London's White Fang: half wolf, half dog. Thus it is clear that the borderline cases of vague terms demand a more nuanced account than merely a forced choice between two extremes. The range of such accounts fall broadly into three classes:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation and Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Contextualist theories (Kamp, 1981; Raffman, 1994; Graff, 2000; Shapiro, 2006, i.a.) broadly hold that the interpretation of vague predicates depend on contextual and pragmatic information such as on the speaker's previous commitments, their perceived goals, and the psychological state of the interpreter. This view could in most cases be reconciled with the supervised learning paradigm, provided that the data upon which the interpretation of the vague predicate hinges (i.e. speaker commitments, etc.) is available as input. Past work in modeling the meaning of vague terms has often focused on these accounts (c.f. \u00a7 6).", |
|
"cite_spans": [ |
|
{ |
|
"start": 23, |
|
"end": 35, |
|
"text": "(Kamp, 1981;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 36, |
|
"end": 50, |
|
"text": "Raffman, 1994;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 51, |
|
"end": 63, |
|
"text": "Graff, 2000;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 64, |
|
"end": 84, |
|
"text": "Shapiro, 2006, i.a.)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation and Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Epistemic accounts (Sorensen, 2001; Williamson, 1994, i.a.) bite the proverbial bullet, allowing for a hard boundary between heaps and non-heaps to exist, but claiming that its location is unknowable. This is in contrast to the supervised paradigm, where the boundary is treated as known.", |
|
"cite_spans": [ |
|
{ |
|
"start": 19, |
|
"end": 35, |
|
"text": "(Sorensen, 2001;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 36, |
|
"end": 59, |
|
"text": "Williamson, 1994, i.a.)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation and Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Logic-based approaches tackle the paradox induced by vagueness, either by claiming that borderline examples do not admit of truth values (supervaluationism), or by adapting logic to permit more granular classifications (many-valued logic; Sorensen, 2018). The latter approach can sometimes accommodate the supervised paradigm. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation and Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Ambiguity and Under-specification It is important to distinguish vagueness from underspecification (imprecision in the input making the output difficult to recover) and ambiguity (the presence of multiple valid answers), both alternative explanations for annotator disagreement. Indeed, Bhattacharya et al. (2019) include both in their taxonomy of VQA images-question pairs with high annotator disagreement. While they are major challenges in any language-based task, both are often defeasible in nature: we can provide additional information that would reveal the \"correct\" answer to an annotator, i.e. we could provide a better, sharper version of the image, or more contextual information. Vagueness is non-defeasible: even if one were to know the exact number of grains of sand, the predicate \"is a heap\" would remain vague.", |
|
"cite_spans": [ |
|
{ |
|
"start": 287, |
|
"end": 313, |
|
"text": "Bhattacharya et al. (2019)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation and Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The interpretation of vague terms as described in \u00a7 1 typically occurs in a grounded setting; the question \"Is this a dog?\" is only meaningful in the context of some state of affairs (or depiction thereof). We focus on binary questions about images, taking examples from VQA and GQA; this ensures that the vague term is the question's focus, excluding openended queries like \"What is the old man doing?\" which only implicitly involve vagueness.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visually Grounded Vagueness", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We begin by isolating a number of vague descriptors (sunny, cloudy, adult, young, new, old) in the VQA and GQA datasets. We then use high-recall regular expressions to match questions from these descriptors in the development sets of both datasets, manually filtering the results to obtain high-precision examples. Here, we make the simplifying assumption that a group of predicates involving these terms, such as \"is x\", \"seems x\" and \"looks x\" are approximately equivalent and used interchangeably.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data collection", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This process results in a variable number of questions per descriptor, with sunny and cloudy typically having far more representation. Given the size of the whole development sets, and the fact that the data presented is being used merely for analysis rather than for training models, we annotate between 32 and 264 examples, depending on the data availability for each predicate. 2 While the VQA development data contains 10 annotations per example, GQA does not, and thus, in order to verify the quality of the VQA annotations and to collect annotations for GQA, we solicited 10-way redundant annotations from Mechanical Turk, presenting annotators with a question and its corresponding image from the visionand-language dataset (e.g. \"Is it sunny?\"). 3 Rather than providing categorical labels (e.g. \"yes\", \"no\") workers were asked to use a slider bar ranging from \"no\" to \"yes\", whose values range from 0 to 100, using an interface inspired by Sakaguchi and Van Durme (2018) . Examples were provided in groups of 8. 4 The resulting annotations are normalized per annotator by the following formula x = (x \u2212 x min )/x max where x min and x max are the annotators minimum and maximum scores. This accounts for differences in slider bar usage by different annotators. Inter-annotator agreement is measured via majority voting, where an annotator is said to agree with others when their judgement falls on the same side of the slider bar scale (i.e. > 50, < 50). Using this metric, we exclude annotators with < 75% agreement. After exclusion, all predicates had > 90% average agreement. 5 . Vagueness and accuracy We begin by demonstrating that vagueness is not merely a theoretical problem: Fig. 2 shows that while the total accuracy of LXMERT (Tan and Bansal, 2019) is fairly high, it drops on all descriptors (except for \"old\" for GQA) when looking only at accuracy in the borderline regions. For VQA, we take advantage of the existing 10-way redundant annotations, defining borderline examples as those for which there was any disagreement between annotators, i.e. even if 9 annotators responded \"yes\" and one responded \"no\" for a given example, it is considered borderline. This results in 49.24% borderline cases. We find that for GQA, defining borderline examples as having mean normalized scores \u2208 [15.0, 85.0] yields roughly the same percentage (47.20% borderline).", |
|
"cite_spans": [ |
|
{ |
|
"start": 381, |
|
"end": 382, |
|
"text": "2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 754, |
|
"end": 755, |
|
"text": "3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 948, |
|
"end": 978, |
|
"text": "Sakaguchi and Van Durme (2018)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 1020, |
|
"end": 1021, |
|
"text": "4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1587, |
|
"end": 1588, |
|
"text": "5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1745, |
|
"end": 1767, |
|
"text": "(Tan and Bansal, 2019)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1692, |
|
"end": 1698, |
|
"text": "Fig. 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data collection", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The contrast between borderline and nonborderline regions is especially dramatic for VQA, with the minimum non-borderline accuracy being 99.67% for \"sunny,\" while the accuracy in the borderline region drops to 69.78%. Though the results are less dramatic for GQA, they generally trend in the same direction. We argue that, given that these borderline examples account for roughly half of the data examined, the relatively high aggregate performance obtained by models on binary questions in VQA and GQA may be partially attributed to an absence of vague terms rather than to the strength of the model. Conversely, given a shifted evaluation dataset with more vague terms, the performance would likely drop dramatically.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data collection", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Vagueness in detail Having demonstrated that model performance is diminished on borderline cases, we seek to further explore the divergence in model and human behavior. Fig. 1 plots the mean human scores in the top plot, with examples ordered by their mean human rating. The bottom plot shows LXMERT output scores for the same examples. The human scores display a sigmoid shape, while the model scores are saturated at either 0 or 1. For the sake of space, the remaining plots are reported in Appendix B, and we constrain ourselves to a quantitative analysis to demonstrate that a similar trend holds across the remaining descriptors.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 169, |
|
"end": 175, |
|
"text": "Fig. 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data collection", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Following Item Response Theory (Reise et al., 2005; Lalor et al., 2016) -a modeling paradigm for psychological tests premised on variability among respondents -we posit a 2-parameter sigmoid response function given by 1 + exp \u2212 k * (x \u2212 x 0 ) \u22121 where k and x 0 are scale and shift parameters, respectively. This parameterization reflects the intuition that non-borderline examples are found near the spectrum's ends (0 and 100) while borderline examples form a curve in the spectrum's center. In other words, it defines an \"ideal\" curve in the sigmoid family that fits the data collected from annotators. In some cases, this curve is stretched, nearing a line, while in others it is more pronounced. We fit three separate logistic regressions: one to the mean of the annotator responses, one to the model response obtained from LXMERT, and a baseline fit against data drawn from a uniform distribution. The quality of the fit, measured by root mean squared error (RMSE) on 10% held-out data, repeated across 10 folds of cross-validation, is given in Fig. 3 . For both datasets, sigmoid functions fit to model predictions have an RMSE comparable to those fit to uniformly random data, while the functions fit to human data have errors an order of magnitude lower. Figure 3 : Mean RMSE from sigmoid fit to VQA and GQA data using 10-fold cross-validation. Human predictions result in a far better sigmoid fit, while model predictions have similar fit to data \u223c U(0, 1).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1051, |
|
"end": 1057, |
|
"text": "Fig. 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1264, |
|
"end": 1272, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data collection", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This indicates that the remaining GQA and VQA predicates follow a similar pattern to the one seen in Fig. 1 . While model predictions often fall on the correct side of the middle threshold, as examples become borderline, some predictions become erratic while others are confidently misclassified. Note that this is doubly problematic: firstly, the model only makes use of a small region of the label space. While the output vocabulary includes entries such as \"partly cloudy\" and \"overcast,\" for all examples tested, the model assigns > 98% of its probability mass to \"yes\" and \"no.\"", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 107, |
|
"text": "Fig. 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data collection", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Even within this constrained assignment, the model has the possibility of hedging using the output logits (e.g. p(yes|x) = 0.40 etc.). Prima facie we might hope that, given a large categoricallylabeled dataset, the model would learn the correct output distribution, as Pavlick and Kwiatkowski (2019) put it, \"for free.\" We do not find this to be the case: the prediction generally heavily favors one label alone, posing problems for any downstream task as well as active learning setups using uncertainty sampling (Lewis and Catlett, 1994) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 514, |
|
"end": 539, |
|
"text": "(Lewis and Catlett, 1994)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data collection", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In contrast, annotators display hedging between the labels, reliably using the slider-bar interface to equivocate between extremes in borderline cases. These results suggest that the first assumption described in \u00a7 2, namely that images can be identified as being in the extension of a descriptor or not (e.g. in the set of scenes described as \"cloudy\"), holds only at the ends of the example range, and is not warranted in the borderline region. In contrast, the training data which LXMERT sees makes the assumption that the descriptor either applies (examples with a \"yes\" label) or does not apply (examples labelled \"no\") in all regions; we see that this is perhaps too strong of an assumption when trying to capture the nuances of vague terms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data collection", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note also that the annotators' standard error (grey band) is generally fairly low even in the central region, where we would expect greater disagreement. This trend holds across descriptors, and perhaps implies that the second assumption, that annotators can reliably recover the mapping between inputs and outputs, does to hold as long as the annotators are provided the proper interface for expressing their intuitions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data collection", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "4 Text-only Vagueness \u00a7 3 explored predicates grounded in another representation of the world, namely images. However, much of NLP deals with text in isolation, without grounding to some external modality. In an ungrounded setting, it is unproductive to evaluate models on external knowledge that they would not have access to-thus, we cannot evaluate a textonly model's performance on vague predicates the same way as a grounded model's performance. In other words, we need to develop a paradigm which does not rely on knowledge about a state of the world, but rather on linguistic knowledge. This is precisely the analytic-synthetic distinction, with analytic truths being truths by virtue of meaning alone (e.g. \"a bachelor is an unmarried man\") and synthetic truths being those which require verification against a state of affairs (e.g. \"Garfield is a bachelor\"). To avoid evaluating our text-only models on their ability to reason against a world which they are not privy to, we restrict our analysis to analytic truths and falsehoods, which we construct by pairing words either with their true definition or with a distractor definition, creating statements that are analytically true and false. Recall from \u00a7 2 that Sentence T/F Mark journalism is newspapers and magazines collectively T T-shirt is an archaic term for clothing F T-shirt is a close-fitting pullover shirt T a teammate is someone who is under suspicion F Table 1 : Example sentences, with their label in the created dataset and corresponding color in Fig. 4 . even the predicates is true and is false may be seen as vague; there are statements which are only partially true or false, and we can speak meaningfully of some statements being truer than others.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1429, |
|
"end": 1436, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1525, |
|
"end": 1531, |
|
"text": "Fig. 4", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data collection", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Following Ettinger et al. (2018) , these statements are created artificially, mitigating annotator bias. Definitions of the 2542 most frequent English nouns 6 are then obtained from WordNet (Miller, 1995; Fellbaum, 1998) using the NLTK interface (Bird, 2006) . By pairing a \"trigger\" word with its definition, we create an analytically true statement (c.f. row 3 in Table 1 ). In order to create analytically false statements, we pair the same word with a definition for a related but distinct term. A set of candidate terms is created recursively taking the hypernym of the trigger word's top wordsense 7 for three levels (i.e. the hyper-hyper-hypernym) and adding all its hyponyms, excluding the trigger's siblings. The best distractor candidate is chosen using lexical overlap, where the candidate with the lowest overlap with the true definition is chosen. Note that as a simplifying assumption we ignore polysemy here; it is possible that via polysemy the chosen distractor definition is not strictly analytically false. However, this result is unlikely given that human annotators reliably recognized distractor definitions. We expect that, while the examples are categorically labeled true and false, annotators will determine that certain statements fall into a borderline region between these extremes, corresponding to notions like \"partially true\" or \"mostly false.\" 8 Crucially, where in \u00a7 3 the vagueness was present in the question itself (i.e. the task was to determine whether the object in question, e.g. the sky, in the image fell into the extension of the vague term e.g. things that are cloudy) here it is in the label set; the task becomes determining whether the statement as a whole falls into the set of true statements. The data is split into 4000 train, 500 development, and 536 test sentences. For all triggers, both statements are found in the same split.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 32, |
|
"text": "Ettinger et al. (2018)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 190, |
|
"end": 204, |
|
"text": "(Miller, 1995;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 205, |
|
"end": 220, |
|
"text": "Fellbaum, 1998)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 246, |
|
"end": 258, |
|
"text": "(Bird, 2006)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 366, |
|
"end": 373, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data collection", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "96 sentences were sampled from the development set and annotated with 10-way redundancy by vetted crowdworkers on Mechanical Turk. Using a similar interface as in \u00a7 3, annotators were presented with sentences and asked to rate the sentence's truth using a sliding bar (ranging from 0 to 100) from false to true. In addition, an \"I don't know\" checkbox was provided to avoid forcing a choice. Sentences were presented in groups of 8. Additional details on the annotation interface can be found in Appendix A.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data collection", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "While the text-only experiments also focus on examining vagueness, several important contrasts to \u00a7 3 must be drawn. In the visual setting, the entire LXMERT model was separately finetuned on the whole GQA and VQA train splits, and analysis examples were sourced from the development data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoders and Models", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In the text-only case, we do not have a pre-made dataset and construct our own. Due to the smaller size of our dataset, we have opted to only fine-tune the classification layer, freezing the weights of the contextualized encoders, unlike in the visual setting where we trained the entire model. This is far less computationally expensive, and allows us to expand our text-only analysis to a range of encoder types and model architectures. We examine three different contextualized encoders:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoders and Models", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "BERT BERT (Devlin et al., 2019 ) is a transformer-based model which uses a word's con-text to predict its identity; during training, words in the input are randomly replaced with a [MASK] token; the model then predicts masked words based on their contexts-a cloze-style task known as masked language modeling (MLM). BERT also uses a next-sentence prediction objective. RoBERTa RoBERTa uses roughly the same methodology as BERT, but trains the model for more epochs with larger batch sizes while removing the next-sentence prediction task. XLNet While traditional language models only consider one factorization (in the forwards or the backwards direction), Yang et al. (2019) maximize the expected log-likelihood with respect to all factorizations input's joint probability.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 30, |
|
"text": "(Devlin et al., 2019", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 657, |
|
"end": 675, |
|
"text": "Yang et al. (2019)", |
|
"ref_id": "BIBREF43" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoders and Models", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Drawing on the observations of Warstadt et al. (2019) that probing results can change dramatically depending on how an encoder is probed, we introduce three probing classifiers: Mean-pool The mean-pool classifier takes the average across all dimensions of the encoder output at each input token, yielding one vector for the whole sentence. This vector is then passed to a 2-layer multi-layer perceptron (MLP) with ReLU activations, which produces a classification over the 2D output space. Sequence The sequence classifier uses the encoder representation at the index of the [CLS] token, which it then passes to a 2-layer MLP with twice as many hidden units as input units. Bilinear This classifier splits the probing prompt into a trigger word (e.g. \"bachelor\") and a definition (e.g. \"an unmarried man\"); it encodes both into vectors, mean-pooling the definition to produce two vectors, which are projected through two linear layers. The projected representations x trig and x def are then passed through a bilinear layer, given by f (x trig , x def ) = x T trig A x def , where A is a 3-dimensional learned parameter.", |
|
"cite_spans": [ |
|
{ |
|
"start": 575, |
|
"end": 580, |
|
"text": "[CLS]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoders and Models", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Control Tasks Following Hewitt and Liang (2019), we construct control tasks for all of our models and encoders. A control task is one where labels and inputs are paired randomly; the purpose of such a task is to disentangle what portion of the probing classifier's performance can be attributed to the strength of the classifier, and what portion is present in the representation. 9", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoders and Models", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We find that our control classifiers perform randomly, indicating our task has very low sensitivity. Fig. 5 shows the test accuracies of all (non-control) models in all settings. We see that all models fall well below human performance, but well above the random baseline of 50%. Among the probing methods, [CLS] pooling slightly outperforms meanpooling. The bilinear method consistently underperforms the pooling methods, suggesting that the gap between human and model performance is not due to malformed prompts (e.g. incorrect articles in the definition or trigger phrase). Appendix C gives some examples and model predictions. Human annotators are able to perform the task with high reliability, achieving an accuracy of 88.54 with majority voting. Fig. 4 shows that certain sentences are easily classified as either true or false, while a smaller number of sentences are considered borderline. A qualitative analysis of these sentences reveals that they typically fall into two categories: sentences where the trigger described is very abstract (e.g. \"a separation is the state of lacking unity\") and those where the distractor definition is very closely related to the trigger (e.g. \"a baby is a person's brother or sister\"). Intuitively, both of these phenomena can make a sentence only partially true or false.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 107, |
|
"text": "Fig. 5", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 754, |
|
"end": 760, |
|
"text": "Fig. 4", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "While Fig. 5 suggests the models are performing reasonably well in the aggregate, Fig. 4 demonstrates a similar trend to those seen in \u00a7 3, showing that the classification patterns of humans differ drastically from those of the best model, as illustrated by the overlaid examples. We also see the same overconfidence in the output distribution of the model, with predictions saturating at either end of the simplex. ting a 2-parameter logistic regression to the aggregate human scores, the model predictions, and samples of a uniformly-distributed random variable, computing the RMSE between the best-fit sigmoid and the data. Across all models and all encoder types, we see that the RMSE of a sigmoid fit to the model predictions is close to or higher than the RMSE of a sigmoid fit to uniformly random data (RMSE random = 0.326), as evidenced by the overlaid red horizontal line, while the sigmoid fit to human performance has a far lower RSME (RMSE human = 0.051). This quantitatively reinforces the qualitative difference seen in Fig. 4 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 6, |
|
"end": 12, |
|
"text": "Fig. 5", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 82, |
|
"end": 88, |
|
"text": "Fig. 4", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 1034, |
|
"end": 1040, |
|
"text": "Fig. 4", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Human-model divergence In similar vein to our work, Pavlick and Kwiatkowski (2019) observe that human annotators consistently disagree on natural language inference (NLI) labels, and that the disagreement cannot be attributed to a lack of annotations. They similarly find that models do not implicitly learn to capture human uncertainty from categorical data. In contrast, our work seeks to pinpoint vagueness as a cause for some of the difference in behavior. 10 Other work has looked at annotating data to accommodate the kinds of disagreements seen in Pavlick and Kwiatkowski. Chen et al. (2020) extends the EASL framework (Sakaguchi and Van Durme, 2018) for efficiently eliciting reliable scalar judgements from crowdworkers to NLI, ob-taining scalar NLI judgements rather than categorical labels. In a similar context, argue that for tasks involving plausibility, the use of cross-entropy loss drives model predictions to the extremes of the simplex, and demonstrate the benefits of shifting to a margin-based loss on the Choice of Plausible Alternatives (Roemmele et al., 2011) task. These results dovetail with our observations regarding various models' output distributions, especially in the text-only setting, where our task is very similar to tasks measuring plausibility.", |
|
"cite_spans": [ |
|
{ |
|
"start": 52, |
|
"end": 82, |
|
"text": "Pavlick and Kwiatkowski (2019)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 461, |
|
"end": 463, |
|
"text": "10", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 555, |
|
"end": 598, |
|
"text": "Pavlick and Kwiatkowski. Chen et al. (2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 626, |
|
"end": 657, |
|
"text": "(Sakaguchi and Van Durme, 2018)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 1060, |
|
"end": 1083, |
|
"text": "(Roemmele et al., 2011)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "While Pavlick and Kwiatkowski (2019) focus on NLI data, Bhattacharya et al. (2019) have noted that similar disagreements exist in the visual domain, specifically on the VQA data set, where they find that certain image-question pairs are less reliably answered than others. The ontology they propose to classify these images includes ambiguity and under-specification, but not vagueness.", |
|
"cite_spans": [ |
|
{ |
|
"start": 6, |
|
"end": 36, |
|
"text": "Pavlick and Kwiatkowski (2019)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 56, |
|
"end": 82, |
|
"text": "Bhattacharya et al. (2019)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Vagueness Past work in vagueness has often focused on modeling it as a phenomenon, while our work is concerned with analyzing model performance on vague predicates, rather than capturing the semantics of vague predicates, which has been the focus of previous work such as Meo et al. (2014) and McMahan and Stone (2015) . Although color terms provide a particularly rich substrate for modeling the semantics of vague terms, we have chosen to exclude them as we feel they demand a level of psychophysical analysis beyond the scope of this work. This work deals instead with gradable terms, following work such as Fern\u00e1ndez and Larsson (2014) , who present a type-theory record account of vagueness for learning the semantics of gradable adjectives, DeVault and Stone (2004) , who use vagueness to illustrate the need for context in a dialog-driven drawing task, and Lassiter and Goodman (2017), who introduce a Bayesian pragmatic model of gradable adjective usage. These lines of previous work draw on the contextualist account of vagueness, holding that the meaning of vague predicates shifts with respect to the interests of the parties communicating, a notion that naturally expresses itself in rational pragmatic models of dialog. Rather than modeling vagueness, we use it as a tool to examine model behavior, focusing on single interactions instead of a dialog. We refer the reader to Juhl and Loomis (2009) for a full account of the analytic/synthetic distinction. Text-only semantic probing The challenge of analyzing the semantic content of sentence en-codings precedes the contextual encoders studied herein; Ettinger et al. (2016) introduce a suite of simple classification tasks for probing the compositionality of LSTM-based sentence embeddings, while Conneau et al. (2018) present 10 linguistically-motivated probing tasks, including 3 semantic tasks, for LSTM-and CNN-based sentence embeddings. Ettinger et al. (2018) create a set of artificial prompts, as done in this work, to probe the compositionality of InferSent (Conneau et al., 2017) , while Dasgupta et al. (2018) use NLI-style prompts for the same purpose.", |
|
"cite_spans": [ |
|
{ |
|
"start": 272, |
|
"end": 289, |
|
"text": "Meo et al. (2014)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 294, |
|
"end": 318, |
|
"text": "McMahan and Stone (2015)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 611, |
|
"end": 639, |
|
"text": "Fern\u00e1ndez and Larsson (2014)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 747, |
|
"end": 771, |
|
"text": "DeVault and Stone (2004)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1388, |
|
"end": 1410, |
|
"text": "Juhl and Loomis (2009)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1616, |
|
"end": 1638, |
|
"text": "Ettinger et al. (2016)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1762, |
|
"end": 1783, |
|
"text": "Conneau et al. (2018)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1907, |
|
"end": 1929, |
|
"text": "Ettinger et al. (2018)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 2031, |
|
"end": 2053, |
|
"text": "(Conneau et al., 2017)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 2062, |
|
"end": 2084, |
|
"text": "Dasgupta et al. (2018)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Similar probing suites have been proposed since the advent of contextual encoders; Tenney et al. (2019b) propose a set of edge-probing tasks that examine semantic content, and Tenney et al. (2019a) find that semantic information is typically encoded at higher transformer layers. Presenting a suite of negative polarity item-based tasks, Warstadt et al. (2019) expand on the observation that different transformer layers account for different phenomena, noting that additionally, the manner in which a probing task is framed often makes a large impact.", |
|
"cite_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 104, |
|
"text": "Tenney et al. (2019b)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 176, |
|
"end": 197, |
|
"text": "Tenney et al. (2019a)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 338, |
|
"end": 360, |
|
"text": "Warstadt et al. (2019)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Dictionary Embeddings Dictionary embeddings, as described by Hill et al. (2016) , use dictionary resources to learn a mapping from phrases to word vectors. Dictionaries have also been used with a view to augmenting the semantic information in word embeddings, as in Tissier et al. (2017) and Bosc and Vincent (2018) . In contrast to these approaches, we use definitions to investigate the semantic content of existing mappings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 79, |
|
"text": "Hill et al. (2016)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 266, |
|
"end": 287, |
|
"text": "Tissier et al. (2017)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 292, |
|
"end": 315, |
|
"text": "Bosc and Vincent (2018)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We have identified clashes between the assumptions made under the current NLP paradigm and the realities of language use by focusing on the phenomenon of vagueness. By isolating a subset of examples from VQA and GQA involving vagueness, we were able to pinpoint some key divergences between model and human behavior which result in lower model performance. We then created an artificial text-only dataset, controlling for world knowledge, which we used to contrast multiple models building on multiple contextualized encoders, finding similar human-model contrasts. In closing, we would like to advocate for the broader use of concepts from the philosophy of language, such as vagueness, in challenging current models and providing additional insights beyond aggregate statistics and leaderboards.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "A Data Collection Figure 12 shows that on certain examples human annotators vary in their truth judgements, with some sentences receiving a high score (i.e. \"True\") from certain annotators and a low score (i.e. \"False\") from others. Further inspection reveals that many of the highest-variance examples have one annotator who is an extreme outlier. Figure 7 shows the MechanicalTurk annotator interface for collecting VQA and GQA annotations. The task was only available to annotators in the US with an approval rating > 98% and more than 500 recorded HITs. Instructions asked annotators to respond to the questions by using the sliding bar. They were provided with a comment box to use in case any issues arose.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 18, |
|
"end": 27, |
|
"text": "Figure 12", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 349, |
|
"end": 357, |
|
"text": "Figure 7", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Similarly, Figure 8 shows the interface for collecting text-only annotations. Here, the task was only shown to annotators from a list of reliable workers. Instructions asked annotators to rate how true a sentence was, and told that sentences may be true or false. They were instructed to use the \"I don't know\" checkbox in cases where they did not know a word in the statement. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 19, |
|
"text": "Figure 8", |
|
"ref_id": "FIGREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Figures 9 and 10 show human annotations plotted against model predictions for all of the predicates examined. In all cases, we see major divergences between human and model data, as quantified in Fig. 3 . We also see that the standard error between annotators is fairly low. Furthermore, we see similar trends between descriptors across the two datasets, with \"new\" being skewed towards the higher end for both. Figure 11 verifies that for the descriptors examined (\"sunny\" and \"cloudy\") the mean score obtained from annotators on Mechanical Turk and the mean score from the VQA development roughly correspond, justifying the use of the VQA development data in \u00a7 3. However, we do note some divergence between the two annotation formats, likely due to the forced choice presented to the original VQA annotators. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 196, |
|
"end": 202, |
|
"text": "Fig. 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 412, |
|
"end": 421, |
|
"text": "Figure 11", |
|
"ref_id": "FIGREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "B Plots", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "It may still be incompatible with log-likelihood. Treating ordinal many-valued logic as a k-way classification problem requires that all values be equidistant, i.e. predicting a value of 1/5 when the true value is 4/5 is as bad as rating it 3/5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note that for some predicates (e.g. sunny and cloudy, more data was available.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Since we were merely verifying the data quality for VQA, we only ran two descriptors: \"sunny\" and \"cloudy\".4 c.f. Appendix A for more on the collection protocol. 5 All data is available at website.com", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.wordfrequency.info 7 Based on pilot evaluations, we exclude chemistry-related wordsenses, as their definitions often contain low-frequency technical terms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note that this conceptualization of truth diverges from that of classical logic, but may be more faithful to actual usage.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "All models are trained for 100 epochs with the Adam optimizer using a learning rate of 0.0001. The best model was chosen by validation performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We examined high-disagreement examples from the data released by Pavlick and Kwiatkowski, which largely seem not to be caused by vagueness except for some examples from JOCI(Zhang et al., 2017), e.g. P: \"I loved apple sauce\", H: \"The sauce is a condiment\" may have high disagreement due to vagueness in the predicate isACondiment(x)\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "VQA: Visual Question Answering", |
|
"authors": [ |
|
{ |
|
"first": "Stanislaw", |
|
"middle": [], |
|
"last": "Antol", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aishwarya", |
|
"middle": [], |
|
"last": "Agrawal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiasen", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Margaret", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"Lawrence" |
|
], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Conference on Computer Vision (ICCV)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stanislaw Antol, Aishwarya Agrawal, Jiasen Lu, Mar- garet Mitchell, Dhruv Batra, C. Lawrence Zitnick, and Devi Parikh. 2015. VQA: Visual Question An- swering. In International Conference on Computer Vision (ICCV).", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Why does a visual question have different answers?", |
|
"authors": [ |
|
{ |
|
"first": "Nilavra", |
|
"middle": [], |
|
"last": "Bhattacharya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qing", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danna", |
|
"middle": [], |
|
"last": "Gurari", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the IEEE International Conference on Computer Vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4271--4280", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nilavra Bhattacharya, Qing Li, and Danna Gurari. 2019. Why does a visual question have different an- swers? In Proceedings of the IEEE International Conference on Computer Vision, pages 4271-4280.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Nltk: the natural language toolkit", |
|
"authors": [ |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bird", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the COLING/ACL on Interactive presentation sessions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "69--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steven Bird. 2006. Nltk: the natural language toolkit. In Proceedings of the COLING/ACL on Interactive presentation sessions, pages 69-72. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Auto-encoding dictionary definitions into consistent word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Bosc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascal", |
|
"middle": [], |
|
"last": "Vincent", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1522--1532", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom Bosc and Pascal Vincent. 2018. Auto-encoding dictionary definitions into consistent word embed- dings. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 1522-1532.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Uncertain natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "Tongfei", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengping", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Poliak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Keisuke", |
|
"middle": [], |
|
"last": "Sakaguchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8772--8779", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.774" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tongfei Chen, Zhengping Jiang, Adam Poliak, Keisuke Sakaguchi, and Benjamin Van Durme. 2020. Un- certain natural language inference. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8772-8779, On- line. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Supervised learning of universal sentence representations from natural language inference data", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lo\u00efc", |
|
"middle": [], |
|
"last": "Barrault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "670--680", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau, Douwe Kiela, Holger Schwenk, Lo\u00efc Barrault, and Antoine Bordes. 2017. Supervised learning of universal sentence representations from natural language inference data. In Proceedings of the 2017 Conference on Empirical Methods in Nat- ural Language Processing, pages 670-680, Copen- hagen, Denmark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "What you can cram into a single &!#* vector: Probing sentence embeddings for linguistic properties", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "German", |
|
"middle": [], |
|
"last": "Kruszewski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lo\u00efc", |
|
"middle": [], |
|
"last": "Barrault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Baroni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ACL 2018-56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2126--2136", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau, German Kruszewski, Guillaume Lam- ple, Lo\u00efc Barrault, and Marco Baroni. 2018. What you can cram into a single &!#* vector: Probing sen- tence embeddings for linguistic properties. In ACL 2018-56th Annual Meeting of the Association for Computational Linguistics, volume 1, pages 2126- 2136. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Evaluating compositionality in sentence embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Ishita", |
|
"middle": [], |
|
"last": "Dasgupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Demi", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Stuhlm\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Samuel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah D", |
|
"middle": [], |
|
"last": "Gershman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Goodman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1802.04302" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ishita Dasgupta, Demi Guo, Andreas Stuhlm\u00fcller, Samuel J Gershman, and Noah D Goodman. 2018. Evaluating compositionality in sentence embed- dings. arXiv preprint arXiv:1802.04302.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Interpreting vague utterances in context", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Devault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Stone", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "COLING 2004: Proceedings of the 20th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1247--1253", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David DeVault and Matthew Stone. 2004. Interpret- ing vague utterances in context. In COLING 2004: Proceedings of the 20th International Conference on Computational Linguistics, pages 1247-1253.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Assessing composition in sentence vector representations", |
|
"authors": [ |
|
{ |
|
"first": "Allyson", |
|
"middle": [], |
|
"last": "Ettinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "Elgohary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Phillips", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1790--1801", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Allyson Ettinger, Ahmed Elgohary, Colin Phillips, and Philip Resnik. 2018. Assessing composition in sen- tence vector representations. In Proceedings of the 27th International Conference on Computational Linguistics, pages 1790-1801.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Probing for semantic evidence of composition by means of simple classification tasks", |
|
"authors": [ |
|
{ |
|
"first": "Allyson", |
|
"middle": [], |
|
"last": "Ettinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "Elgohary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 1st Workshop on Evaluating Vector-Space Representations for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "134--139", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W16-2524" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Allyson Ettinger, Ahmed Elgohary, and Philip Resnik. 2016. Probing for semantic evidence of composition by means of simple classification tasks. In Proceed- ings of the 1st Workshop on Evaluating Vector-Space Representations for NLP, pages 134-139, Berlin, Germany. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Wordnet: An electronic lexical database cambridge", |
|
"authors": [ |
|
{ |
|
"first": "Christiane", |
|
"middle": [], |
|
"last": "Fellbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christiane Fellbaum. 1998. Wordnet: An electronic lexical database cambridge. MA: MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Vagueness and learning: A type-theoretic approach", |
|
"authors": [ |
|
{ |
|
"first": "Raquel", |
|
"middle": [], |
|
"last": "Fern\u00e1ndez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Staffan", |
|
"middle": [], |
|
"last": "Larsson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Third Joint Conference on Lexical and Computational Semantics (* SEM 2014)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "151--159", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Raquel Fern\u00e1ndez and Staffan Larsson. 2014. Vague- ness and learning: A type-theoretic approach. In Proceedings of the Third Joint Conference on Lex- ical and Computational Semantics (* SEM 2014), pages 151-159.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Making the V in VQA matter: Elevating the role of image understanding in Visual Question Answering", |
|
"authors": [ |
|
{ |
|
"first": "Yash", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tejas", |
|
"middle": [], |
|
"last": "Khot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douglas", |
|
"middle": [], |
|
"last": "Summers-Stay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Conference on Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. 2017. Making the V in VQA matter: Elevating the role of image under- standing in Visual Question Answering. In Confer- ence on Computer Vision and Pattern Recognition (CVPR).", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Shifting sands: An interestrelative theory of vagueness. Philosophical topics", |
|
"authors": [ |
|
{ |
|
"first": "Delia", |
|
"middle": [], |
|
"last": "Graff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "28", |
|
"issue": "", |
|
"pages": "45--81", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Delia Graff. 2000. Shifting sands: An interest- relative theory of vagueness. Philosophical topics, 28(1):45-81.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Designing and interpreting probes with control tasks", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Hewitt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2733--2743", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Hewitt and Percy Liang. 2019. Designing and interpreting probes with control tasks. In Proceed- ings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th Inter- national Joint Conference on Natural Language Pro- cessing (EMNLP-IJCNLP), pages 2733-2743.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Learning to understand phrases by embedding the dictionary", |
|
"authors": [ |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Korhonen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "17--30", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Felix Hill, Kyunghyun Cho, Anna Korhonen, and Yoshua Bengio. 2016. Learning to understand phrases by embedding the dictionary. Transactions of the Association for Computational Linguistics, 4:17-30.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Gqa: A new dataset for real-world visual reasoning and compositional question answering", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Drew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Hudson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6700--6709", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Drew A Hudson and Christopher D Manning. 2019. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In Proceed- ings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6700-6709.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "The paradox of the heap", |
|
"authors": [ |
|
{ |
|
"first": "Hans", |
|
"middle": [], |
|
"last": "Kamp", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1981, |
|
"venue": "Aspects of Philosophical Logic", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "225--277", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hans Kamp. 1981. The paradox of the heap. In Aspects of Philosophical Logic, pages 225-277. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Building an evaluation scale using item response theory", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Lalor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "648--657", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1062" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John P. Lalor, Hao Wu, and Hong Yu. 2016. Build- ing an evaluation scale using item response theory. In Proceedings of the 2016 Conference on Empiri- cal Methods in Natural Language Processing, pages 648-657, Austin, Texas. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Adjectival vagueness in a bayesian model of interpretation", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Lassiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Noah D Goodman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Synthese", |
|
"volume": "194", |
|
"issue": "10", |
|
"pages": "3801--3836", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Lassiter and Noah D Goodman. 2017. Adjecti- val vagueness in a bayesian model of interpretation. Synthese, 194(10):3801-3836.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Heterogeneous uncertainty sampling for supervised learning", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "David", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Catlett", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "Machine learning proceedings", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "148--156", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David D Lewis and Jason Catlett. 1994. Heteroge- neous uncertainty sampling for supervised learning. In Machine learning proceedings 1994, pages 148- 156. Elsevier.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Learning to rank for plausible plausibility", |
|
"authors": [ |
|
{ |
|
"first": "Zhongyang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tongfei", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4818--4823", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhongyang Li, Tongfei Chen, and Benjamin Van Durme. 2019. Learning to rank for plausi- ble plausibility. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4818-4823.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Roberta: A robustly optimized bert pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "A bayesian model of grounded color semantics", |
|
"authors": [ |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Mcmahan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Stone", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "103--115", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Brian McMahan and Matthew Stone. 2015. A bayesian model of grounded color semantics. Transactions of the Association for Computational Linguistics, 3:103-115.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Generating and resolving vague color references", |
|
"authors": [ |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Meo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Mcmahan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Stone", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 18th Workshop on the Semantics and Pragmatics of Dialogue (SemDial)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "107--115", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Timothy Meo, Brian McMahan, and Matthew Stone. 2014. Generating and resolving vague color refer- ences. In Proceedings of the 18th Workshop on the Semantics and Pragmatics of Dialogue (SemDial), pages 107-115.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Wordnet: a lexical database for english", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "George", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Communications of the ACM", |
|
"volume": "38", |
|
"issue": "11", |
|
"pages": "39--41", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George A Miller. 1995. Wordnet: a lexical database for english. Communications of the ACM, 38(11):39- 41.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Inherent disagreements in human textual inferences", |
|
"authors": [ |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Kwiatkowski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "677--694", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ellie Pavlick and Tom Kwiatkowski. 2019. Inherent disagreements in human textual inferences. Transac- tions of the Association for Computational Linguis- tics, 7:677-694.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Vagueness without paradox", |
|
"authors": [ |
|
{ |
|
"first": "Diana", |
|
"middle": [], |
|
"last": "Raffman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "The Philosophical Review", |
|
"volume": "103", |
|
"issue": "1", |
|
"pages": "41--74", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diana Raffman. 1994. Vagueness without paradox. The Philosophical Review, 103(1):41-74.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Item response theory: Fundamentals, applications, and promise in psychological research", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Steven", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Reise", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Ainsworth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Haviland", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Current Directions in Psychological Science", |
|
"volume": "14", |
|
"issue": "2", |
|
"pages": "95--101", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1111/j.0963-7214.2005.00342.x" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steven P. Reise, Andrew T. Ainsworth, and Mark G. Haviland. 2005. Item response theory: Fundamen- tals, applications, and promise in psychological re- search. Current Directions in Psychological Sci- ence, 14(2):95-101.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Choice of plausible alternatives: An evaluation of commonsense causal reasoning", |
|
"authors": [ |
|
{ |
|
"first": "Melissa", |
|
"middle": [], |
|
"last": "Roemmele", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew S", |
|
"middle": [], |
|
"last": "Cosmin Adrian Bejan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gordon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "AAAI spring symposium: logical formalizations of commonsense reasoning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "90--95", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Melissa Roemmele, Cosmin Adrian Bejan, and An- drew S Gordon. 2011. Choice of plausible alterna- tives: An evaluation of commonsense causal reason- ing. In AAAI spring symposium: logical formaliza- tions of commonsense reasoning, pages 90-95.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Efficient online scalar annotation with bounded support", |
|
"authors": [ |
|
{ |
|
"first": "Keisuke", |
|
"middle": [], |
|
"last": "Sakaguchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "208--218", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Keisuke Sakaguchi and Benjamin Van Durme. 2018. Efficient online scalar annotation with bounded sup- port. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 208-218.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Vagueness in context", |
|
"authors": [ |
|
{ |
|
"first": "Stewart", |
|
"middle": [], |
|
"last": "Shapiro", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stewart Shapiro. 2006. Vagueness in context. Oxford University Press on Demand.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Vagueness and contradiction", |
|
"authors": [ |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Sorensen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roy Sorensen. 2001. Vagueness and contradiction. Clarendon Press.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Vagueness", |
|
"authors": [ |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Sorensen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "The Stanford Encyclopedia of Philosophy", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roy Sorensen. 2018. Vagueness. In Edward N. Zalta, editor, The Stanford Encyclopedia of Philosophy, summer 2018 edition. Metaphysics Research Lab, Stanford University.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Lxmert: Learning cross-modality encoder representations from transformers", |
|
"authors": [ |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5103--5114", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hao Tan and Mohit Bansal. 2019. Lxmert: Learning cross-modality encoder representations from trans- formers. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 5103-5114.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "BERT rediscovers the classical NLP pipeline", |
|
"authors": [ |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Tenney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4593--4601", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1452" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ian Tenney, Dipanjan Das, and Ellie Pavlick. 2019a. BERT rediscovers the classical NLP pipeline. In Proceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 4593- 4601, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "What do you learn from context? probing for sentence structure in contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Tenney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Xia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Berlin", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Poliak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Mccoy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Najoung", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ian Tenney, Patrick Xia, Berlin Chen, Alex Wang, Adam Poliak, R Thomas McCoy, Najoung Kim, Benjamin Van Durme, Sam Bowman, Dipanjan Das, and Ellie Pavlick. 2019b. What do you learn from context? probing for sentence structure in contextu- alized word representations. In International Con- ference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Dict2vec: Learning word embeddings using lexical dictionaries", |
|
"authors": [ |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Tissier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Gravier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amaury", |
|
"middle": [], |
|
"last": "Habrard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "254--263", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julien Tissier, Christopher Gravier, and Amaury Habrard. 2017. Dict2vec: Learning word embed- dings using lexical dictionaries. In Proceedings of the 2017 Conference on Empirical Methods in Natu- ral Language Processing, pages 254-263.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Investigating bert's knowledge of language: Five analysis methods with npis", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Warstadt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ioana", |
|
"middle": [], |
|
"last": "Grosu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hagen", |
|
"middle": [], |
|
"last": "Blix", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yining", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Alsop", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shikha", |
|
"middle": [], |
|
"last": "Bordia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haokun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alicia", |
|
"middle": [], |
|
"last": "Parrish", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2870--2880", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Warstadt, Yu Cao, Ioana Grosu, Wei Peng, Ha- gen Blix, Yining Nie, Anna Alsop, Shikha Bordia, Haokun Liu, Alicia Parrish, et al. 2019. Investi- gating bert's knowledge of language: Five analysis methods with npis. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 2870-2880.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.08237" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Ruslan Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretrain- ing for language understanding. arXiv preprint arXiv:1906.08237.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Ordinal common-sense inference", |
|
"authors": [ |
|
{ |
|
"first": "Sheng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Rudinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Duh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Average annotator scores and model scores for questions containing vague terms on the VQA dataset", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "379--395", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sheng Zhang, Rachel Rudinger, Kevin Duh, and Ben- jamin Van Durme. 2017. Ordinal common-sense in- ference. Transactions of the Association for Compu- tational Linguistics, 5:379-395. 10: Average annotator scores and model scores for questions containing vague terms on the VQA dataset.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Accuracy of LXMERT on VQA and GQA Yes/No questions per predicate is highest for nonborderline examples, but drops in \"borderline\" regions.", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Top: mean truth score given by humans on 96 statements. False statements colored red, true blue; statements from Table 1 overlaid. Bottom: P (true) assigned by the best probing classifier (XLNet + [CLS]).", |
|
"uris": null |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Test accuracy across encoders and probing methods; all models perform well above chance.", |
|
"uris": null |
|
}, |
|
"FIGREF4": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Fig. 6further reinforces this; here, we perform the same analysis as in \u00a7 3, fit-10-fold cross-validated RMSE against model of 2-parameter sigmoid against model predictions from each encoder and model pairing. RMSE to human performance (green line, bottom) and against random data (red line, top) are overlaid. RMSE to model predictions is close to or worse than to random data.", |
|
"uris": null |
|
}, |
|
"FIGREF5": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Mechanical Turk annotation template for visual annotations.", |
|
"uris": null |
|
}, |
|
"FIGREF6": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Mechanical Turk annotation template for text annotations.", |
|
"uris": null |
|
}, |
|
"FIGREF7": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Manual verification of VQA plots shows that Mechanical Turker's judgments largely correspond to those present in the development set, with some divergence.", |
|
"uris": null |
|
}, |
|
"FIGREF8": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Human means and quartiles for examples ranked by average score", |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"text": "contains 28 example sentences from the validation set, with human classifications derived by majority voting over the annotators who did not use the \"I don't know\" box, as well as classifications obtained by the [CLS] model." |
|
}, |
|
"TABREF3": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"text": "Sentences, labels, human means and model logits for 28 sample validation examples." |
|
} |
|
} |
|
} |
|
} |