|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T02:13:32.547119Z" |
|
}, |
|
"title": "Comparing Probabilistic, Distributional and Transformer-Based Models on Logical Metonymy Interpretation", |
|
"authors": [ |
|
{ |
|
"first": "Giulia", |
|
"middle": [], |
|
"last": "Rambelli", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Pisa", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Emmanuele", |
|
"middle": [], |
|
"last": "Chersoni", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Kong Polytechnic University", |
|
"location": { |
|
"settlement": "Hong" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Pisa", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Philippe", |
|
"middle": [], |
|
"last": "Blache", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Marseille University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Chu-Ren", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Kong Polytechnic University", |
|
"location": { |
|
"settlement": "Hong" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In linguistics and cognitive science, Logical metonymies are defined as type clashes between an event-selecting verb and an entitydenoting noun (e.g. The editor finished the article), which are typically interpreted by inferring a hidden event (e.g. reading) on the basis of contextual cues. This paper tackles the problem of logical metonymy interpretation, that is, the retrieval of the covert event via computational methods. We compare different types of models, including the probabilistic and the distributional ones previously introduced in the literature on the topic. For the first time, we also tested on this task some of the recent Transformer-based models, such as BERT, RoBERTa, XLNet, and GPT-2. Our results show a complex scenario, in which the best Transformer-based models and some traditional distributional models perform very similarly. However, the low performance on some of the testing datasets suggests that logical metonymy is still a challenging phenomenon for computational modeling.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In linguistics and cognitive science, Logical metonymies are defined as type clashes between an event-selecting verb and an entitydenoting noun (e.g. The editor finished the article), which are typically interpreted by inferring a hidden event (e.g. reading) on the basis of contextual cues. This paper tackles the problem of logical metonymy interpretation, that is, the retrieval of the covert event via computational methods. We compare different types of models, including the probabilistic and the distributional ones previously introduced in the literature on the topic. For the first time, we also tested on this task some of the recent Transformer-based models, such as BERT, RoBERTa, XLNet, and GPT-2. Our results show a complex scenario, in which the best Transformer-based models and some traditional distributional models perform very similarly. However, the low performance on some of the testing datasets suggests that logical metonymy is still a challenging phenomenon for computational modeling.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The phenomenon of logical metonymy is defined as a type clash between an event-selecting metonymic verb (e.g., begin) and an entity-denoting nominal object (e.g., the book), which triggers the recovery of a hidden event (e.g., reading). Logical metonymies have been widely studied, on the one hand, in theoretical linguistics as they represent a challenge to traditional theories of compositionality (Asher, 2015; Pustejovsky and Batiukova, 2019) . On the other hand, they received extensive attention in cognitive research on human sentence processing as they determine extra processing costs during online sentence comprehension (McElree et al., 2001; Traxler et al., 2002) , apparently related to \"the deployment of operations to construct a semantic representation of the event\" (Frisson and McElree, 2008) . 1 Logical metonymy has also been explained in terms of the words-as-cues hypothesis proposed by Jeffrey Elman (Elman, 2009 (Elman, , 2014 . This hypothesis relies on the experimental evidence that human semantic memory stores knowledge about events and their typical participants (see McRae and Matsuki (2009) for an overview) and claims that words act like cues to access event knowledge, incrementally modulating sentence comprehension. The results obtained in a probe recognition experiment by Zarcone et al. (2014) , in line with this explanation, suggest that speakers interpret logical metonymies by inferring the most likely event the sentences could refer to, given the contextual cues. Previous research in NLP on logical metonymy has often been influenced by such theoretical explanation (Zarcone and Pad\u00f3, 2011; Zarcone et al., 2012; .", |
|
"cite_spans": [ |
|
{ |
|
"start": 400, |
|
"end": 413, |
|
"text": "(Asher, 2015;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 414, |
|
"end": 446, |
|
"text": "Pustejovsky and Batiukova, 2019)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 631, |
|
"end": 653, |
|
"text": "(McElree et al., 2001;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 654, |
|
"end": 675, |
|
"text": "Traxler et al., 2002)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 783, |
|
"end": 810, |
|
"text": "(Frisson and McElree, 2008)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 813, |
|
"end": 814, |
|
"text": "1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 923, |
|
"end": 935, |
|
"text": "(Elman, 2009", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 936, |
|
"end": 950, |
|
"text": "(Elman, , 2014", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1098, |
|
"end": 1122, |
|
"text": "McRae and Matsuki (2009)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1310, |
|
"end": 1331, |
|
"text": "Zarcone et al. (2014)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 1611, |
|
"end": 1635, |
|
"text": "(Zarcone and Pad\u00f3, 2011;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 1636, |
|
"end": 1657, |
|
"text": "Zarcone et al., 2012;", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In our contribution, we propose a general comparison of different classes of computational models for logical metonymy. To begin with, we tested two approaches that have been previously introduced in the literature on the topic: probabilistic and distributional models (Zarcone et al., 2012) . We also examined the Structured Distributional Model (SDM) by Chersoni et al. (2019) , which represents sentence meaning with a combination of formal structures and distributional embeddings to dynamically integrate knowledge about events and their typical participants, as they are activated by lexical items. Finally, to the best of our knowledge, we are the first ones to include the recent Transformer language models into a contrastive study on logical metonymy. Transformers (Vaswani et al., 2017; Devlin et al., 2019) are the dominant class of NLP systems in the last few years, since they are able to generate \"dynamic\" representations for a target word depending on the sentence context. As the interpretation of logical metonymy is highly sensitive to context, we deem that the contextual representations built by Transformers might be able to integrate the covert event that is missing in the surface form of the sentence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 269, |
|
"end": 291, |
|
"text": "(Zarcone et al., 2012)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 356, |
|
"end": 378, |
|
"text": "Chersoni et al. (2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 775, |
|
"end": 797, |
|
"text": "(Vaswani et al., 2017;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 798, |
|
"end": 818, |
|
"text": "Devlin et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "All models are evaluated on their capability of assigning the correct interpretation to a metonymic sentence, that is, recovering the verb that refers to the correct interpretation. This task is hard for computational models, as they must exploit contextual cues to distinguish covert events with a high typicality (e.g., The pianist begins the symphony \u2192 playing) from plausible but less typical ones (\u2192 composing).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "According to Zarcone et al. (2013) , the phenomenon of logical metonymy can be explained in terms of the thematic fit, that is, the degree of compatibility between the verb and one of its arguments (the direct object, in this case). On the one hand, a low thematic fit between an event-selecting verb and an entity-denoting argument triggers the recovery of a covert event, while on the other hand, the recovered event is often the best fitting one, given the information available in the sentence. Research in NLP on logical metonymy initially focused on the problem of covert event retrieval, which was tackled by means of probabilistic models (Lapata and Lascarides, 2003; Shutova, 2009) , or by using Distributional Semantic Models (DSMs) that identify the candidate covert event with the one that has the highest thematic fit with the arguments in the sentence (Zarcone et al., 2012) . Following the psycholinguistic works by McElree et al. (2001) and Traxler et al. (2002) , which reported increased reading times and longer fixations in eye-tracking for the metonymic sentences, Zarcone et al. (2013) proposed a distributional model of the thematic fit between verb and object, and showed that it accurately reproduces the differences between the experimental conditions in the data from the two original studies. A general distributional model for sentence com-prehension was used by to simultaneously tackle both these two aspects of logical metonymy (covert event retrieval and increased processing times), although at the cost of a highly-elaborated compositional model. The authors recently introduced a more up-to-date and refined version of their sentence comprehension model (Chersoni et al., 2019) , but it has not been tested on the logical metonymy task so far.", |
|
"cite_spans": [ |
|
{ |
|
"start": 13, |
|
"end": 34, |
|
"text": "Zarcone et al. (2013)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 646, |
|
"end": 675, |
|
"text": "(Lapata and Lascarides, 2003;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 676, |
|
"end": 690, |
|
"text": "Shutova, 2009)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 866, |
|
"end": 888, |
|
"text": "(Zarcone et al., 2012)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 931, |
|
"end": 956, |
|
"text": "McElree et al. (2001) and", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 957, |
|
"end": 978, |
|
"text": "Traxler et al. (2002)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 1086, |
|
"end": 1107, |
|
"text": "Zarcone et al. (2013)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 1690, |
|
"end": 1713, |
|
"text": "(Chersoni et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computational Models of Logical Metonymy", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The traditional approach in Distributional Semantics has been the building of a single, stable vector representation for each word type in the corpus (Turney and Pantel, 2010; Lenci, 2018) . Lately, a new generation of embeddings has emerged, in which each occurrence of a word in a specific sentence context gets a unique representation (Peters et al., 2018) . The most recent systems typically rely on an LSTM or a Transformer architecture for getting word representations: they are trained on large amounts of textual data and the word vectors are learned as a function of the internal states of the encoder, such that a word in different sentence contexts determines different activation states and is represented by a different vector. Thus, embeddings generated by these new models are said to be contextualized, as opposed to the static vectors generated by the earlier frameworks, and they aim at modeling the specific sense assumed by the word in context. One of the most popular and successful contextualized model is probably BERT (Devlin et al., 2019) , whose key technical innovation is applying the bidirectional training of Transformer, a popular attention model, to language modelling. This is in contrast to previous efforts which looked at a text sequence either from left to right or combined left-to-right and right-toleft training. The results of the paper show that a language model with bidirectional training can have a deeper sense of language context and structure than single-direction language models. An interesting aspect of Transformer models like BERT is that they are trained via masked language modeling, that is, they have to retrieve a word that has been masked in a given input sentence. Since interpreting logical metonymy implies the retrieval of an event that is not overtly expressed and that humans retrieve integrating the lexical cues in the sentence, these models are potentially a very good fit for this task. To draw an analogy, we could imagine that the covert event is a verb that has been 'masked' in the linguistic input and that we ask BERT-like models to make a guess.", |
|
"cite_spans": [ |
|
{ |
|
"start": 150, |
|
"end": 175, |
|
"text": "(Turney and Pantel, 2010;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 176, |
|
"end": 188, |
|
"text": "Lenci, 2018)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 338, |
|
"end": 359, |
|
"text": "(Peters et al., 2018)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1042, |
|
"end": 1063, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer Models in NLP", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "It is important to point out that not all Transformers are used for masked language modeling: among those tested for this study, BERT and RoBERTa are directly trained with this objective, XLNet is trained with permutation language modeling, but can still retrieve a hidden word given a bidirectional context, and GPT-2 works similarly to a traditional, unidirectional language model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer Models in NLP", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Our research question focuses on how computational models can interpret metonymic sentences. To explore this issue, we define the task of logical metonymy interpretation as a covert event recovery task. More specifically, given a sentence like The architect finished the house, the computational model has to return the most likely hidden verb for the sentence, i.e. the covert event representing its interpretation. Despite the architectural differences, all tested models compute a plausibility score of a verb as expressing the covert event associated with a <subject, metonymic verb, object> triple. We evaluate the scores returned by a model against human judgments using the standard measures of accuracy and correlation depending if the dataset contains categorical or continuous variables.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In our experiments, we use three datasets designed for previous psycholinguistic studies, and a newly created one by means of an elicitation task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The McElree dataset (MC) comprises the stimuli from the sentences of the self-paced reading experiment of McElree et al. (2001) and includes 30 pairs of tuples. Each pair has the same subject, metonymic verb, object, just the covert verb varies. As in the conditions of the original experiment, the hidden verb could be either highly plausible, or plausible but less typical, given the subject and the object of the tuple. The Traxler dataset (TR) results from the sentences of the eye-tracking experiment of Traxler et al. (2002) and includes 36 pairs of tuples. The format is the same as the McElree dataset. On these two datasets, the models have to perform a binary classification task, with the goal of assigning a higher score to the covert event in the typical condition.", |
|
"cite_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 127, |
|
"text": "McElree et al. (2001)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 509, |
|
"end": 530, |
|
"text": "Traxler et al. (2002)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The Lapata-Lascarides dataset (L&L) (Lapata and Lascarides, 2003) includes 174 tuples, each composed by a metonymic verb, an object and a potential covert verb. The authors collected plausibility ratings for each metonymy by turning the tuples into sentences and used the Magnitude Estimation Paradigm (Stevens, 1957) to ask human subjects to rate the plausibility of the interpretation of the metonymic verb. Finally, the mean ratings have been normalized and log-transformed.", |
|
"cite_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 65, |
|
"text": "(Lapata and Lascarides, 2003)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 302, |
|
"end": 317, |
|
"text": "(Stevens, 1957)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "A further dataset of recovered covert events (CE) was collected by the authors. The metonymic sentences used in the McElree and Traxler experiments were turned into 69 templates with an empty slot corresponding to the covert event (e.g., The student began the book late in the semester). Thirty subjects recruited with crowdsourcing were asked to produce two verbs that provided the most likely fillers for the event slot. Out of the 4, 084 collected verbs, we selected those with a production frequency \u2265 3 for a given stimulus. The final dataset comprises 285 items each consisting of a subject -metonymic verb -object tuple t and a covert event e associated with a salience score corresponding to the event conditional probability given the tuple P (e|t) (i.e., the production frequency of e normalized by the total events produced for t). In the case of the latter two datasets, for each model we compute the Spearman's correlation between the probabilities generated by the model and the human judgements. Examples from these datasets are provided in Table 1. While collecting the data for CE, we also run a statistical comparison between the production frequencies of the verbs in the typical and in the atypical condition that appear in the binary classification datasets, to ensure that humans genuinely agree on the higher typicality of the former. The result confirmed this assumption: according to the Wilcoxon signed rank test with continuity correction, the frequencies of production of the typical verbs for the MC dataset were significantly higher (W = 424, p < 0.001), and the same holds for the typical verbs in the TR dataset (W = 526.5, p < 0.001).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1056, |
|
"end": 1064, |
|
"text": "Table 1.", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In the following section, we describe the general aspects of the computational models that we tested on logical metonymy interpretation. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "As a baseline model, we adopt the simple probabilistic approach proposed by Lapata and Lascarides (2003) and replicated by Zarcone et al. (2012) as the SO p model, which was reported as the best performing probabilistic model on the task. The interpretation of a logical metonymy (e.g., The pianist began the symphony) is modelled as the joint distribution P(s, v, o, e) of the variables s (the subject, pianist), v (the metonymic verb, began), o (the object, symphony), and the covert event e (e.g., play). We compute that probability considering the metonymic verb constant:", |
|
"cite_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 104, |
|
"text": "Lapata and Lascarides (2003)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 123, |
|
"end": 144, |
|
"text": "Zarcone et al. (2012)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Model", |
|
"sec_num": "3.3.1" |
|
}, |
|
{ |
|
"text": "P (s, v", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Model", |
|
"sec_num": "3.3.1" |
|
}, |
|
{ |
|
"text": ", o, e) \u2248 P (e)P (o|e)P (s|e)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Model", |
|
"sec_num": "3.3.1" |
|
}, |
|
{ |
|
"text": "The verb E representing the preferred interpretation of the metonymy is the verb e maximizing the following equation: E = argmax e P (e)P (o|e)P (s|e)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Model", |
|
"sec_num": "3.3.1" |
|
}, |
|
{ |
|
"text": "We computed the statistics from a 2018 dump of the English Wikipedia, parsed with the Stanford CoreNLP toolkit (Manning et al., 2014 ", |
|
"cite_spans": [ |
|
{ |
|
"start": 111, |
|
"end": 132, |
|
"text": "(Manning et al., 2014", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Model", |
|
"sec_num": "3.3.1" |
|
}, |
|
{ |
|
"text": "Distributional models of logical metonymy assume that the event recovery task can be seen as a thematic fit task: recovering the covert event means identifying the verb with the highest thematic fit with the metonymic sentence. We reimplement the distributional model by Zarcone et al. (2012) with the following procedure:", |
|
"cite_spans": [ |
|
{ |
|
"start": 271, |
|
"end": 292, |
|
"text": "Zarcone et al. (2012)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Logical Metonymy as Thematic Fit", |
|
"sec_num": "3.3.2" |
|
}, |
|
{ |
|
"text": "\u2022 we retrieve the n (= 500) 2 most strongly associated verbs for the subject and the object respectively, and we take the intersection of the two lists;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Logical Metonymy as Thematic Fit", |
|
"sec_num": "3.3.2" |
|
}, |
|
{ |
|
"text": "\u2022 we update their association scores using either the sum (add) or the product (prod) function;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Logical Metonymy as Thematic Fit", |
|
"sec_num": "3.3.2" |
|
}, |
|
{ |
|
"text": "\u2022 we select the embeddings corresponding to the first m (= 20) verbs in this list and we add them together to create the prototype vector of the verb given the subject and the object;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Logical Metonymy as Thematic Fit", |
|
"sec_num": "3.3.2" |
|
}, |
|
{ |
|
"text": "\u2022 the thematic fit of the covert event e with respect to the nominal entities is computed as the similarity score of its corresponding lexical vector e with the prototype vector. As we did the probabilistic model, we discard the metonymic verb from this computation. 3 We test two variations of this model, TF-add and TF-prod, which differ for the filler selection update function. Statistics were extracted from Wikipedia 2018, and the vectors were the publiclyavailable Wikipedia embeddings 4 trained with the FastText model (Bojanowski et al., 2017) . The verb-filler association score is the Local Mutual Information (Evert, 2008) . Similarly, the scores for the subject fillers are defined as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 267, |
|
"end": 268, |
|
"text": "3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 527, |
|
"end": 552, |
|
"text": "(Bojanowski et al., 2017)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 621, |
|
"end": 634, |
|
"text": "(Evert, 2008)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Logical Metonymy as Thematic Fit", |
|
"sec_num": "3.3.2" |
|
}, |
|
{ |
|
"text": "LM I(s, e) = f (e sbj \u2190 \u2212 \u2212 s)log 2 p(s|e) p(s)p(e)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Logical Metonymy as Thematic Fit", |
|
"sec_num": "3.3.2" |
|
}, |
|
{ |
|
"text": "where s is the subject, e the covert event, and f (e sbj \u2190 \u2212 \u2212 s) indicates the frequency of e with the subject. The scores for the object position are computed with the following formula:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Logical Metonymy as Thematic Fit", |
|
"sec_num": "3.3.2" |
|
}, |
|
{ |
|
"text": "LM I(o, e) = f (e obj \u2190 \u2212 \u2212 o)log 2 p(o|e) p(o)p(e)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Logical Metonymy as Thematic Fit", |
|
"sec_num": "3.3.2" |
|
}, |
|
{ |
|
"text": "where o is the object and f (e obj \u2190 \u2212 \u2212 o) represents the joint frequency of e with the object.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Logical Metonymy as Thematic Fit", |
|
"sec_num": "3.3.2" |
|
}, |
|
{ |
|
"text": "The Structured Distributional Model (SDM) proposed by Chersoni et al. (2019) consists of two components: a Distributional Event Graph (henceforth, DEG), and a meaning composition function. DEG represents event knowledge as a graph automatically built from parsed corpora, where the nodes are words associated to a numeric vector, and the edges are labeled with syntactic relations and weighted using statistic association measures. Each event is represented as a path in DEG, that is, a sequence of edges (relations) which joins a sequence of vertices (words). Thus, given a lexical cue w, it is possible to identify the associated events and to generate expectations about incoming inputs on both the paradigmatic and the syntagmatic axis.", |
|
"cite_spans": [ |
|
{ |
|
"start": 54, |
|
"end": 76, |
|
"text": "Chersoni et al. (2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Structured Distributional Model", |
|
"sec_num": "3.3.3" |
|
}, |
|
{ |
|
"text": "The composition function makes use of two semantic structures (inspired by DRT (Kamp, 2013) ): the linguistic condition (LC), a contextindependent tier of meaning, and the active context (AC), which accumulates contextual information available during sentence processing or activated by lexical items. The crucial aspect is that the model associates a vectorial representation to these formal structures: LC is the sum of the embeddings of the lexical items of a sentence; AC, for each syntactic slot, is represented as the centroid vector built out of the role vectors r 1 , ..., r n available in AC, i.e. the syntactic associates of the lexical items that have been already processed.", |
|
"cite_spans": [ |
|
{ |
|
"start": 79, |
|
"end": 91, |
|
"text": "(Kamp, 2013)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Structured Distributional Model", |
|
"sec_num": "3.3.3" |
|
}, |
|
{ |
|
"text": "In our implementation of SDM, the DEG is constructed by extracting syntactic relations from the same dump of Wikipedia adopted in the previous models, and we chose as lexical embeddings the same FastText Wikipedia vectors. Following the same assumption of the previous experiment, we model the covert event recovery task as a thematic fit task: the goal is to predict the hidden verb on the basis of the subject and the object, treating the metonymic verb as a constant. Specifically, the model builds a semantic representation for each tuple in the dataset. The linguistic condition vector LC contains the sum of the subject and object embeddings. At the same time, the event knowledge vector AC contains the prototypical embedding for the main verb, using DEG to retrieve the most associated verbs for the subject and the object, as in Chersoni et al. (2019) . The scoring function has been adapted to the event recovery task as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 838, |
|
"end": 860, |
|
"text": "Chersoni et al. (2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Structured Distributional Model", |
|
"sec_num": "3.3.3" |
|
}, |
|
{ |
|
"text": "cos( e, LC(sent)) + cos( e, AC(sent))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Structured Distributional Model", |
|
"sec_num": "3.3.3" |
|
}, |
|
{ |
|
"text": "where sent refers to the metonymic test tuple. In other words, we quantify the typicality of a verb for a tuple subject-object as the sum of i.) the cosine similarity between the event embedding and the additive combination of the other argument vectors ( LC) and ii.) the cosine similarity between the event embedding and the prototype vector representing the active context ( AC).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Structured Distributional Model", |
|
"sec_num": "3.3.3" |
|
}, |
|
{ |
|
"text": "We experiment with four Transformer models which have been shown to obtain state-of-the-art performances on several NLP benchmarks. The popular BERT model (Devlin et al., 2019) was the first to adopt the bidirectional training of Transformer for a language modeling task. To make this kind of training possible, BERT introduced a masked language modeling objective function: random words in the input sentences are replaced by a [MASK] token and the model attempts to predict the masked token based on the surrounding context. Simultaneously, BERT is optimized on a next sentence prediction task, as the model receives sentence pairs in input and has to predict whether the second sentence is subsequent to the first one in the training data. 5 BERT has been trained on a concatenation of the BookCorpus and the English Wikipedia, for a total of 3300M tokens ca. In our experiments, we used the larger pre-trained version, called BERT-large-cased.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer-based Models", |
|
"sec_num": "3.3.4" |
|
}, |
|
{ |
|
"text": "RoBERTa (Liu et al., 2019) has the same architecture as BERT, but it introduces several parameter optimization choices: it makes use of dynamic masking (compared to the static masking of the original model), of a larger batch-size and a larger vocabulary size. Moreover, the input consists of complete sentences randomly extracted from one or multiple documents, and the next sentence prediction objective is removed. Besides the optimized design choice, another key difference of RoBERTa with the other models is the larger training corpus, which consists of a concatenation of the Book-Corpus, CCNEWS, OpenWebText, and STORIES. With a total 160GB of text, RoBERTa has access to more potential knowledge than the other models. For our tests, we used the large pre-trained model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 26, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer-based Models", |
|
"sec_num": "3.3.4" |
|
}, |
|
{ |
|
"text": "XLNet (Yang et al., 2019 ) is a generalized autoregressive (AR) pretraining method which uses the context words to predict the next word. The AR architecture is constrained to a single direction (either forward or backwards), that is, context representation takes in consideration only the tokens to the left or to the right of the i-th position, while BERT representation has access to the contextual information on both sides. To capture bidirectional contexts, XLNet is trained with a permutation method as language modeling objective, where all tokens are predicted but in random order. XLNet's training corpora were the same as BERT plus Giga5, ClueWeb 2012-B and Common Crawl, for a total of 32.89B subword piece. Also in this case, we used the large pre-trained model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 6, |
|
"end": 24, |
|
"text": "(Yang et al., 2019", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer-based Models", |
|
"sec_num": "3.3.4" |
|
}, |
|
{ |
|
"text": "GPT-2 (Radford et al., 2019) , a variation of GPT, is a uni-directional transformer language model, which means that the training objective is to predict the next word, given all of the previous words. Compared with GPT, GPT-2 optimizes the layer normalization, expands the vocabulary size to 50,257, increases the context size from 512 to 1024 tokens, and optimizes with a larger batch size of 512. In addition, GPT-2 is pre-trained on WebText, which was created by scraping web pages, for a total of 8 million documents of data (40 GB). We 5 Notice that the usefulness of this secondary objective function was questioned, and it was indeed removed in more recent models (Yang et al., 2019; Liu et al., 2019; Joshi et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 6, |
|
"end": 28, |
|
"text": "(Radford et al., 2019)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 542, |
|
"end": 543, |
|
"text": "5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 672, |
|
"end": 691, |
|
"text": "(Yang et al., 2019;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 692, |
|
"end": 709, |
|
"text": "Liu et al., 2019;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 710, |
|
"end": 729, |
|
"text": "Joshi et al., 2020)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer-based Models", |
|
"sec_num": "3.3.4" |
|
}, |
|
{ |
|
"text": "used the XL version of GPT-2 for our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transformer-based Models", |
|
"sec_num": "3.3.4" |
|
}, |
|
{ |
|
"text": "The parameters of the Transformer models are reported in Table 3 . BERT, RoBERTa and XLNet are used to perform a word prediction task: given a sentence and a masked word in position k, they compute the probability of a word w k given the context k : P (w i |context k ). For our experiments, the context is the entire sentence S with the k-th word (the covert event) being replaced by a special token '[MASK]'. Therefore, we turned the test tuples into full sentences, masking the verb as in the example below: The architect finishes [MASK] house. 6 We then compute the probability of a hidden verb to occur in that position, and we expect the preferred verb to get a high value. We performed this task using the packages of the Hap-pyTransformer library. 7", |
|
"cite_spans": [ |
|
{ |
|
"start": 534, |
|
"end": 540, |
|
"text": "[MASK]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 548, |
|
"end": 549, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 64, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Transformer-based Models", |
|
"sec_num": "3.3.4" |
|
}, |
|
{ |
|
"text": "As GPT-2 works as a traditional language model, we adopted this model to calculate the probability of the entire sentence (instead of the probability of the hidden verb given the context). In this case, we expect that sentences evoking more typical events get higher values. We adopted the lm-scorer package to compute sentence probabilities. 8 Table 5 and 4 report the final evaluation scores. The performance of the probabilistic model is in line with previous studies, and it outperforms distributional models in some cases, proving that it is indeed a hard baseline to beat. However, accuracy and correlation are computed only on a subgroup of the test items: actually, the model covers about 60% of the datasets' tuples (86.8% for L&L), as we reported in Table 2 . Coverage is the main issue probabilistic models have to face (Zarcone et al., 2013) , while distributional models do not experience such limitation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 831, |
|
"end": 853, |
|
"text": "(Zarcone et al., 2013)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 345, |
|
"end": 352, |
|
"text": "Table 5", |
|
"ref_id": "TABREF7" |
|
}, |
|
{ |
|
"start": 760, |
|
"end": 767, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Transformer-based Models", |
|
"sec_num": "3.3.4" |
|
}, |
|
{ |
|
"text": "Regarding the thematic fit models, we observe that there is no difference between the TF-add and TF-prod models, as they obtain similar scores. However, we need to point out that, when the system computes the intersection of the two lists of the top verbs for subjects and objects, sometimes the number of retrieved items is less than 20 (the model parameter for the verb embedding selection, cf. Section 3.3.2). Therefore, independently of the selected function, the verbs used to compute the prototypical vector are eventually all those belonging to the intersection. Moreover, TF-models are often close to, and never significantly outperform the probabilistic baseline. Among the distributional models, SDM is the one that obtains a considerable performance across all the datasets. This model performs close to RoBERTa both in the Traxler and in the CE dataset. This result is surprising, considering that SDM is trained just on a dump of Wikipedia, while RoBERTa is trained on 160 GB of text and implements advanced deep learning techniques. This outcome confirms that SDM, which has been designed to represent event knowledge and the dynamic construction of sentence meaning, is able to adequately model the typicality of events. This aspect has been suggested to be one of the core components of the language processing system (Baggio and Hagoort, 2011; Baggio et al., 2012; Chersoni et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 1334, |
|
"end": 1360, |
|
"text": "(Baggio and Hagoort, 2011;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1361, |
|
"end": 1381, |
|
"text": "Baggio et al., 2012;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1382, |
|
"end": 1404, |
|
"text": "Chersoni et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "On the other hand, Transformers also provided interesting results. RoBERTa achieves the best score for the L&L dataset, reaching a statistical significance of the improvement over SDM (p < 0.01). 9 More importantly, it is the only Transformer that consistently obtains good results across all datasets, while the scores from other Transformer models are highly fluctuating. We believe that the gigantic size of the training corpus is a factor that positively affects its performance. At the same time, GPT-2 achieves the highest score for MC dataset (0.87) (but the improvement over RoBERTa and SDM does not reach statistical significance), although it performs significantly lower on the other benchmarks 10 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For the sake of completeness, we also report the overall performance of each model over the two tasks. Results identify RoBERTa and GPT-2 as the best models for the correlation and classification tasks, respectively. However, we wonder if the average score is a valid measure to identify the best model. These two models tend to have a wavering behavior, which results in large differences between the two datasets scores. Specifically, Roberta achieves 0.75 for the L&L dataset, but only 0.39 for the CE one, with 0.36 points of difference. Similarly, GPT-2 reaches 0.89 scores for the MC dataset, but its performance goes down by 0.16. On the contrary, SDM behavior is more stable, with a smaller gap between the two datasets' scores (0.13 point difference for the correlation task and just 0.05 for the accuracy task).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Binary classification task For the MC and TR datasets, we evaluate the models for their capability of assigning a higher probability to the verb in the typical condition. It is important to empha-size that both verbs are plausible in the context, but one describes a more likely event given the subject and the object. This remark is essential, because it explains the performance of all models, distributional and Transformer ones.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error analysis", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "To identify which tuples are the most difficult ones, we built a heat map visualizing the correctlypredicted ones in blue, and the wrong ones in yellow (see Figures 1 and 2) . We do not consider the accuracy values obtained by the probabilistic model for its partial coverage. This visualization technique reveals that some pairs are never predicted correctly, corresponding to the fully vertical yellow lines in the figures. In what follow we report the tuples that are consistently mistaken for MC (1) and TR (2) datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 173, |
|
"text": "Figures 1 and 2)", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Error analysis", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "(1) a. The teenager starts the novel.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error analysis", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "b. The worker begins the memo.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error analysis", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "(2) a. The editor finishes the newspaper. b. The director starts the script. c. The teenager begins the novel.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error analysis", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In all the above cases, a model must discriminate between the verb read (HIGH TYP) and write (LOW TYP). 11 It is interesting to notice that, for many of the read-write pairs in the binary classification data, the production frequencies of typical and atypical verb are much closer than on average, suggesting that the interpretation requires understanding of subtle nuances of context-sensitive typicality, which might not be trivial even for humans.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error analysis", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Furthermore, in Figure 2 we observe that for two TR's pairs, SDM is the only one picking the right choice: The stylist starts the braid and The auditor begins the taxes. It seems that models regularly tend to prefer a verb with a more generic and undetermined meaning (make and do, respectively), while only SDM correctly assigns the HIGH TYP class to the verbs that indicate more precisely the manner of doing something (braid and audit).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 16, |
|
"end": 24, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Error analysis", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "On the other hand, GPT-2 and RoBERTa managed to pick the right choice for a few of the readwrite items on which SDM is mistaken.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error analysis", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Correlation task Correlation is a more complex task compared to classification, as the lower scores also reveal. To better understand our results, we select the best model for the CE (i.e., SDM) and L&L (i.e., RoBERTa) datasets, and we plot the linear relationship between the human ratings and the model-derived probabilities. 12 For CE, Figure 3 reveals 1) a small positive correlation between the two variables, 2) a large amount of variance, and 3) a few outliers.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 339, |
|
"end": 348, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Error analysis", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "As for L&L in Figure 4 , the majority of the points follow a roughly linear relationship, and there is a small variation around the trend. Nevertheless, this result could be influenced by the form of the input sentences. For all the other datasets, we masked the token between the verb and the object, and the corresponding hidden verb had to be in the progressive form (The chef starts [cooking] dinner). For L&L, instead, we chose to insert the preposition to after the verb since lots of the metonymic verbs (want, try, etc.) require to be followed by the infinitive verb. Thus, the context gives a higher probability to verbs as masked tokens, while different parts of speech could be equally plausible for the other conditions. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 14, |
|
"end": 22, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Error analysis", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In this paper, we have presented a comparative evaluation of several computational models on the task of logical metonymy interpretation. We frame this problem as the retrieval of an event that is not overtly expressed in the surface form of the sentence. According to Elman's Words-as-Cues framework, human subjects can infer the covert event in logical metonymy thanks to the generalized knowledge about events and participants stored in their semantic memory. Hence, during sentence processing, words in the sentence create a network of mutual expectations that triggers the retrieval of typical events associated with lexical items and gen-erates expectations about the upcoming words (Elman, 2014). To tackle the task of logical metonymy interpretation, computational models must be able to recover unexpressed relationships between the words, using a context-sensitive representation of meaning that captures this event knowledge.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Conclusions", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The most compelling outcome of the reported experiments is probably the performance of SDM, which achieves the best score for the TR and the CE datasets. These results demonstrate the significance of encoding event structures outside the embeddings (which are treated as nodes in a distributional graph), and the ability of the SDM compositional function to dynamically update the semantic representation for a sentence. However, the evaluation scores are not very high, especially in the correlation task. Results reveal that the contextualized information used by computational models is useful to recall plausible events connected to the arguments, but this is still not sufficient. Even Transformer models, which currently report state-of-theart performances on several NLP benchmarks, are not performing significantly better than the SDM model, which is trained on a smaller corpus and without any advanced deep learning technique. Error analysis highlights that they are able to identify the plausible scenarios in which the participants could occur, but they still struggle in perceiving different nuances of typicality. Our experiments show how the logical metonymy task can be seen as a testing ground to check whether computational models encode common-sense event knowledge.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Conclusions", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Future work might follow two directions. On the one hand, expanding the coverage of the graph could favourably increase the performance of SDM. On the other hand, Transformer models could be tested with new experimental settings, such as the fine-tuning of the pre-trained weights on thematic fit-related (Lenci, 2011; Sayeed et al., 2016; Santus et al., 2017) or semantic role classification tasks (Collobert et al., 2011; Zapirain et al., 2013; Roth and Lapata, 2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 305, |
|
"end": 318, |
|
"text": "(Lenci, 2011;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 319, |
|
"end": 339, |
|
"text": "Sayeed et al., 2016;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 340, |
|
"end": 360, |
|
"text": "Santus et al., 2017)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 399, |
|
"end": 423, |
|
"text": "(Collobert et al., 2011;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 424, |
|
"end": 446, |
|
"text": "Zapirain et al., 2013;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 447, |
|
"end": 469, |
|
"text": "Roth and Lapata, 2015)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Conclusions", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Notice however that the evidence is not uncontroversial:Delogu et al. (2017) report that coercion costs largely reflect word surprisal, without any specific effect of type shift in the early processing measures.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We set a high value for this parameter in order to maximize the coverage.3Zarcone et al. (2012) show that, for both the probabilistic and the distributional model, including the metonymic verb does not help too much in terms of performance and leads to coverage issues.4 https://fasttext.cc/docs/en/ english-vectors.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "One of the anonymous reviewers argues that the performance of the Transformer-based models might be influenced by the prompt sentence and suggest more variations of the input sentences. We indeed tested several manipulations of the inputs before feeding them to the transformers, changing 1) the tense of the metonymic verb (using the past tense) and 2) the number of the direct object (we used the plurals of the dataset nouns). However, the results did not show any consistent trend.7 https://github.com/EricFillion/ happy-transformer 8 https://pypi.org/project/lm-scorer/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The p-value is computed with Fisher's r-to-z transformation, one-tailed test.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We determine the significance of differences between models for MC and TR datasets with a McNemar's Chi-Square Test, applied to a 2x2 contingency matrix containing the number of correct and incorrect answers (replicating the approach ofZarcone et al. (2012)).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Except for the sentence in 2.a, where the typical verb is edit.12 We apply the logarithmic transformation of data for visualization purposes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work, carried out within the Institut Convergence ILCB (ANR-16-CONV-0002), has benefited from support from the French government, managed by the French National Agency for Research (ANR) and the Excellence Initiative of Aix-Marseille University (A*MIDEX). We thank the anonymous reviewers for their insightful feedback.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": "6" |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Types, Meanings and Coercions in Lexical Semantics", |
|
"authors": [ |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Asher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Lingua", |
|
"volume": "157", |
|
"issue": "", |
|
"pages": "66--82", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nicholas Asher. 2015. Types, Meanings and Coercions in Lexical Semantics. Lingua, 157:66-82.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "The Balance between Memory and Unification in Semantics: A Dynamic Account of the N400. Language and Cognitive Processes", |
|
"authors": [ |
|
{ |
|
"first": "Giosu\u00e8", |
|
"middle": [], |
|
"last": "Baggio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Hagoort", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "26", |
|
"issue": "", |
|
"pages": "1338--1367", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Giosu\u00e8 Baggio and Peter Hagoort. 2011. The Balance between Memory and Unification in Semantics: A Dynamic Account of the N400. Language and Cog- nitive Processes, 26(9):1338-1367.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "The Processing Consequences of Compositionality. The Oxford Handbook of Compositionality", |
|
"authors": [ |
|
{ |
|
"first": "Giosu\u00e8", |
|
"middle": [], |
|
"last": "Baggio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Van Lambalgen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hagoort", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "657--674", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Giosu\u00e8 Baggio, Michiel Van Lambalgen, and Peter Ha- goort. 2012. The Processing Consequences of Com- positionality. The Oxford Handbook of Composi- tionality. Oxford, pages 657-674.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Enriching Word Vectors with Subword Information", |
|
"authors": [ |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "135--146", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching Word Vectors with Subword Information. Transactions of the Associa- tion for Computational Linguistics, 5:135-146.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Logical Metonymy in a Distributional Model of Sentence Comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Emmanuele", |
|
"middle": [], |
|
"last": "Chersoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philippe", |
|
"middle": [], |
|
"last": "Blache", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of *SEM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emmanuele Chersoni, Alessandro Lenci, and Philippe Blache. 2017. Logical Metonymy in a Distributional Model of Sentence Comprehension. In Proceedings of *SEM.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A Structured Distributional Model of Sentence Meaning and Processing", |
|
"authors": [ |
|
{ |
|
"first": "Emmanuele", |
|
"middle": [], |
|
"last": "Chersoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Enrico", |
|
"middle": [], |
|
"last": "Santus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ludovica", |
|
"middle": [], |
|
"last": "Pannitto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philippe", |
|
"middle": [], |
|
"last": "Blache", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C-R", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Natural Language Engineering", |
|
"volume": "25", |
|
"issue": "4", |
|
"pages": "483--502", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emmanuele Chersoni, Enrico Santus, Ludovica Pan- nitto, Alessandro Lenci, Philippe Blache, and C-R Huang. 2019. A Structured Distributional Model of Sentence Meaning and Processing. Natural Lan- guage Engineering, 25(4):483-502.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Natural Language Processing (Almost) from Scratch", |
|
"authors": [ |
|
{ |
|
"first": "Ronan", |
|
"middle": [], |
|
"last": "Collobert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L\u00e9on", |
|
"middle": [], |
|
"last": "Bottou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Karlen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Koray", |
|
"middle": [], |
|
"last": "Kavukcuoglu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pavel", |
|
"middle": [], |
|
"last": "Kuksa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2493--2537", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ronan Collobert, Jason Weston, L\u00e9on Bottou, Michael Karlen, Koray Kavukcuoglu, and Pavel Kuksa. 2011. Natural Language Processing (Almost) from Scratch. Journal of Machine Learning Research, 12(Aug):2493-2537.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Teasing Apart Coercion and Surprisal: Evidence from Eye-Movements and ERPs", |
|
"authors": [ |
|
{ |
|
"first": "Francesca", |
|
"middle": [], |
|
"last": "Delogu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Matthew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heiner", |
|
"middle": [], |
|
"last": "Crocker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Drenhaus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Cognition", |
|
"volume": "161", |
|
"issue": "", |
|
"pages": "46--59", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Francesca Delogu, Matthew W Crocker, and Heiner Drenhaus. 2017. Teasing Apart Coercion and Sur- prisal: Evidence from Eye-Movements and ERPs. Cognition, 161:46-59.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of NAACL-HLT 2019", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Un- derstanding. In Proceedings of NAACL-HLT 2019, Minneapolis, MN.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "On the Meaning of Words and Dinosaur Bones: Lexical Knowledge without a Lexicon", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jeffrey L Elman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Cognitive Science", |
|
"volume": "33", |
|
"issue": "4", |
|
"pages": "547--582", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey L Elman. 2009. On the Meaning of Words and Dinosaur Bones: Lexical Knowledge without a Lex- icon. Cognitive Science, 33(4):547-582.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "The Architecture of Cognition: Rethinking Fodor and Pylyshyn's Systematicity Challenge", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jeffrey L Elman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey L Elman. 2014. Systematicity in the Lexicon: On Having your Cake and Eating It Too. In Paco Calvo and John Symons, editors, The Architecture of Cognition: Rethinking Fodor and Pylyshyn's System- aticity Challenge. The MIT Press, Cambridge, MA.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Corpora and collocations. Corpus linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Stefan", |
|
"middle": [ |
|
"Evert" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "An international handbook", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "1212--1248", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stefan Evert. 2008. Corpora and collocations. Cor- pus linguistics. An international handbook, 2:1212- 1248.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Complement Coercion is not Modulated by Competition: Evidence from Eye Movements", |
|
"authors": [ |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Frisson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Mcelree", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Journal of Experimental Psychology: Learning, Memory, and Cognition", |
|
"volume": "34", |
|
"issue": "1", |
|
"pages": "1--11", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steven Frisson and Brian McElree. 2008. Complement Coercion is not Modulated by Competition: Evi- dence from Eye Movements. Journal of Experimen- tal Psychology: Learning, Memory, and Cognition, 34(1):1-11.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Spanbert: Improving Pre-training by Representing and Predicting Spans. Transactions of the Association for Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Daniel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Weld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "64--77", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mandar Joshi, Danqi Chen, Yinhan Liu, Daniel S Weld, Luke Zettlemoyer, and Omer Levy. 2020. Spanbert: Improving Pre-training by Representing and Predict- ing Spans. Transactions of the Association for Com- putational Linguistics, 8:64-77.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Meaning and the Dynamics of Interpretation: Selected Papers by Hans Kamp", |
|
"authors": [ |
|
{ |
|
"first": "Hans", |
|
"middle": [], |
|
"last": "Kamp", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hans Kamp. 2013. Meaning and the Dynamics of In- terpretation: Selected Papers by Hans Kamp. Brill, Leiden-Boston.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "A Probabilistic Account of Logical Metonymy", |
|
"authors": [ |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Lascarides", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Computational Linguistics", |
|
"volume": "29", |
|
"issue": "2", |
|
"pages": "261--315", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mirella Lapata and Alex Lascarides. 2003. A Proba- bilistic Account of Logical Metonymy. Computa- tional Linguistics, 29(2):261-315.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Composing and Updating Verb Argument Expectations: A Distributional Semantic Model", |
|
"authors": [ |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the ACL Workshop on Cognitive Modeling and Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alessandro Lenci. 2011. Composing and Updating Verb Argument Expectations: A Distributional Se- mantic Model. In Proceedings of the ACL Workshop on Cognitive Modeling and Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Distributional Models of Word Meaning", |
|
"authors": [ |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Annual Review of Linguistics", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "151--171", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alessandro Lenci. 2018. Distributional Models of Word Meaning. Annual Review of Linguistics, 4:151-171.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Roberta: A Robustly Optimized BERT Pretraining Approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A Robustly Optimized BERT Pretraining Approach. arXiv preprint arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "The Stanford CoreNLP natural language processing toolkit", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Bauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jenny", |
|
"middle": [], |
|
"last": "Finkel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bethard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Mcclosky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of 52nd Annual Meeting of the Association for Computational Linguistics: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "55--60", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/P14-5010" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher Manning, Mihai Surdeanu, John Bauer, Jenny Finkel, Steven Bethard, and David McClosky. 2014. The Stanford CoreNLP natural language pro- cessing toolkit. In Proceedings of 52nd Annual Meeting of the Association for Computational Lin- guistics: System Demonstrations, pages 55-60, Bal- timore, Maryland. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Reading Time Evidence for Enriched Composition", |
|
"authors": [ |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Mcelree", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Matthew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Traxler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rachel", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Pickering", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ray", |
|
"middle": [], |
|
"last": "Seely", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jackendoff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Cognition", |
|
"volume": "78", |
|
"issue": "", |
|
"pages": "17--25", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Brian McElree, Matthew J Traxler, Martin J Pickering, Rachel E Seely, and Ray Jackendoff. 2001. Reading Time Evidence for Enriched Composition. Cogni- tion, 78:B17-B25.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "People Use their Knowledge of Common Events to Understand Language, and Do So as Quickly as Possible", |
|
"authors": [ |
|
{ |
|
"first": "Ken", |
|
"middle": [], |
|
"last": "Mcrae", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazunaga", |
|
"middle": [], |
|
"last": "Matsuki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "1417--1429", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ken McRae and Kazunaga Matsuki. 2009. People Use their Knowledge of Common Events to Understand Language, and Do So as Quickly as Possible. Lan- guage and Linguistics Compass, 3(6):1417-1429.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Deep Contextualized Word Representations", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Matthew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew E Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep Contextualized Word Rep- resentations. In Proceedings of NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "The Lexicon", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Pustejovsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olga", |
|
"middle": [], |
|
"last": "Batiukova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Pustejovsky and Olga Batiukova. 2019. The Lex- icon. Cambridge University Press.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Language Models Are Unsupervised Multitask Learners", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rewon", |
|
"middle": [], |
|
"last": "Child", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dario", |
|
"middle": [], |
|
"last": "Amodei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Ope-nAI Blog", |
|
"volume": "1", |
|
"issue": "8", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language Models Are Unsupervised Multitask Learners. Ope- nAI Blog, 1(8):9.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Context-Aware Frame-Semantic Role Labeling", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "449--460", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Roth and Mirella Lapata. 2015. Context- Aware Frame-Semantic Role Labeling. Transac- tions of the Association for Computational Linguis- tics, 3:449-460.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Measuring Thematic Fit with Distributional Feature Overlap", |
|
"authors": [ |
|
{ |
|
"first": "Enrico", |
|
"middle": [], |
|
"last": "Santus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emmanuele", |
|
"middle": [], |
|
"last": "Chersoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philippe", |
|
"middle": [], |
|
"last": "Blache", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Enrico Santus, Emmanuele Chersoni, Alessandro Lenci, and Philippe Blache. 2017. Measuring The- matic Fit with Distributional Feature Overlap. In Proceedings of EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Thematic Fit Evaluation: An Aspect of Selectional Preferences", |
|
"authors": [ |
|
{ |
|
"first": "Asad", |
|
"middle": [], |
|
"last": "Sayeed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clayton", |
|
"middle": [], |
|
"last": "Greenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vera", |
|
"middle": [], |
|
"last": "Demberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the ACL Workshop on Evaluating Vector Space Representations for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Asad Sayeed, Clayton Greenberg, and Vera Demberg. 2016. Thematic Fit Evaluation: An Aspect of Se- lectional Preferences. In Proceedings of the ACL Workshop on Evaluating Vector Space Representa- tions for NLP.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Sense-Based Interpretation of Logical Metonymy Using a Statistical Method", |
|
"authors": [ |
|
{ |
|
"first": "Ekaterina", |
|
"middle": [], |
|
"last": "Shutova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the ACL-IJCNLP 2009 Student Research Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ekaterina Shutova. 2009. Sense-Based Interpretation of Logical Metonymy Using a Statistical Method. In Proceedings of the ACL-IJCNLP 2009 Student Re- search Workshop, pages 1-9.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "On the Psychophysical Law", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Stanley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Stevens", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1957, |
|
"venue": "Psychological review", |
|
"volume": "64", |
|
"issue": "3", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stanley S Stevens. 1957. On the Psychophysical Law. Psychological review, 64(3):153.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Coercion in Sentence Processing: Evidence from Eye-Movements and Self-Paced Reading", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Matthew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Traxler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Pickering", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mcelree", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Journal of Memory and Language", |
|
"volume": "47", |
|
"issue": "4", |
|
"pages": "530--547", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew J Traxler, Martin J Pickering, and Brian McEl- ree. 2002. Coercion in Sentence Processing: Evi- dence from Eye-Movements and Self-Paced Read- ing. Journal of Memory and Language, 47(4):530- 547.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "From Frequency to Meaning: Vector Space Models of Semantics", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Turney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pantel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Journal of Artificial Intelligence Research", |
|
"volume": "37", |
|
"issue": "", |
|
"pages": "141--188", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter D Turney and Patrick Pantel. 2010. From Fre- quency to Meaning: Vector Space Models of Se- mantics. Journal of Artificial Intelligence Research, 37:141-188.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Attention Is All You Need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention Is All You Need. In Advances in Neural Information Pro- cessing Systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "XLNet: Generalized Autoregressive Pretraining for Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.08237" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Ruslan Salakhutdinov, and Quoc V Le. 2019. XLNet: Generalized Autoregressive Pretrain- ing for Language Understanding. arXiv preprint arXiv:1906.08237.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Selectional Preferences for Semantic Role Classification", |
|
"authors": [ |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Benat Zapirain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lluis", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Marquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Computational Linguistics", |
|
"volume": "39", |
|
"issue": "3", |
|
"pages": "631--663", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benat Zapirain, Eneko Agirre, Lluis Marquez, and Mi- hai Surdeanu. 2013. Selectional Preferences for Se- mantic Role Classification. Computational Linguis- tics, 39(3):631-663.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Fitting, not Clashing! A Distributional Semantic Model of Logical Metonymy", |
|
"authors": [ |
|
{ |
|
"first": "Alessandra", |
|
"middle": [], |
|
"last": "Zarcone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Pad\u00f3", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Utt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of IWCS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alessandra Zarcone, Alessandro Lenci, Sebastian Pad\u00f3, and Jason Utt. 2013. Fitting, not Clash- ing! A Distributional Semantic Model of Logical Metonymy. In Proceedings of IWCS.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Generalized Event Knowledge in Logical Metonymy Resolution", |
|
"authors": [ |
|
{ |
|
"first": "Alessandra", |
|
"middle": [], |
|
"last": "Zarcone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Pad\u00f3", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of CogSci", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alessandra Zarcone and Sebastian Pad\u00f3. 2011. Gener- alized Event Knowledge in Logical Metonymy Res- olution. In Proceedings of CogSci.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Logical Metonymy Resolution in a Words-as-Cues Framework: Evidence from Selfpaced Reading and Probe Recognition", |
|
"authors": [ |
|
{ |
|
"first": "Alessandra", |
|
"middle": [], |
|
"last": "Zarcone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Pad\u00f3", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Cognitive Science", |
|
"volume": "38", |
|
"issue": "5", |
|
"pages": "973--996", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alessandra Zarcone, Sebastian Pad\u00f3, and Alessandro Lenci. 2014. Logical Metonymy Resolution in a Words-as-Cues Framework: Evidence from Self- paced Reading and Probe Recognition. Cognitive Science, 38(5):973-996.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Modeling Covert Event Retrieval in Logical Metonymy: Probabilistic and Distributional Accounts", |
|
"authors": [ |
|
{ |
|
"first": "Alessandra", |
|
"middle": [], |
|
"last": "Zarcone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Utt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Pad\u00f3", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the NAACL Workshop on Cognitive Modeling and Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alessandra Zarcone, Jason Utt, and Sebastian Pad\u00f3. 2012. Modeling Covert Event Retrieval in Logi- cal Metonymy: Probabilistic and Distributional Ac- counts. In Proceedings of the NAACL Workshop on Cognitive Modeling and Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Heat map for error analysis over MC dataset." |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Heat map for error analysis over TR dataset." |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "SDM correlation for CE." |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "RoBERTa correlation for L&L." |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Coverage for the probabilistic model.", |
|
"content": "<table/>", |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"num": null, |
|
"text": "", |
|
"content": "<table><tr><td>: Comparison between transformer models.</td></tr><tr><td>Model details: L: number of layers, H: dimension of</td></tr><tr><td>hidden states, A: attention head numbers, and P: total</td></tr><tr><td>parameter size.</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF6": { |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Results for binary classification task.", |
|
"content": "<table><tr><td/><td>Probabilistic</td><td/><td>Distributional</td><td/><td/><td colspan=\"2\">Transformer-based</td><td/></tr><tr><td/><td>SOp</td><td colspan=\"3\">TF-add TF-prod SDM</td><td colspan=\"4\">BERT RoBERTa XLNet GPT-2</td></tr><tr><td>L&L</td><td>0.53</td><td>0.41</td><td>0.41</td><td>0.53</td><td>0.61</td><td>0.73</td><td>0.04</td><td>0.43</td></tr><tr><td>CE</td><td>0.36</td><td>0.26</td><td>0.22</td><td>0.40</td><td>0.27</td><td>0.39</td><td>0.18</td><td>0.31</td></tr><tr><td>O. P.</td><td>0.45</td><td>0.34</td><td>0.32</td><td>0.47</td><td>0.44</td><td>0.56</td><td>0.11</td><td>0.37</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF7": { |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Results for correlation task.", |
|
"content": "<table/>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |