|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:13:56.426617Z" |
|
}, |
|
"title": "Fine-grained Event Classification in News-like Text Snippets Shared Task 2, CASE 2021", |
|
"authors": [ |
|
{ |
|
"first": "Jacek", |
|
"middle": [], |
|
"last": "Haneczok", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Erste Digital Vienna", |
|
"location": { |
|
"country": "Austria" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Jacquet", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Joint Research Centre European Commission Isrpa", |
|
"institution": "", |
|
"location": { |
|
"country": "Italy" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Jakub", |
|
"middle": [], |
|
"last": "Piskorski", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Nicolas", |
|
"middle": [], |
|
"last": "Stefanovitch", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Joint Research Centre European Commission Isrpa", |
|
"institution": "", |
|
"location": { |
|
"country": "Italy" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper describes the Shared Task on Fine-grained Event Classification in News-like Text Snippets. The Shared Task is divided into three subtasks: (a) classification of text snippets reporting socio-political events (25 classes) for which vast amount of training data exists, although exhibiting different structure and style visa -vis test data, (b) enhancement to a generalized zero-shot learning problem, where 3 additional event types were introduced in advance, but without any training data ('unseen' classes), and (c) further extension, which introduced 2 additional event types, announced shortly prior to the evaluation phase. The reported Shared Task focuses on classification of events in English texts and is organized as part of the Workshop on Challenges and Applications of Automated Extraction of Socio-political Events from Text (CASE 2021), co-located with the ACL-IJCNLP 2021 Conference. Four teams participated in the task. Best performing systems for the three aforementioned subtasks achieved 83.9%, 79.7% and 77.1% weighted F 1 scores respectively.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper describes the Shared Task on Fine-grained Event Classification in News-like Text Snippets. The Shared Task is divided into three subtasks: (a) classification of text snippets reporting socio-political events (25 classes) for which vast amount of training data exists, although exhibiting different structure and style visa -vis test data, (b) enhancement to a generalized zero-shot learning problem, where 3 additional event types were introduced in advance, but without any training data ('unseen' classes), and (c) further extension, which introduced 2 additional event types, announced shortly prior to the evaluation phase. The reported Shared Task focuses on classification of events in English texts and is organized as part of the Workshop on Challenges and Applications of Automated Extraction of Socio-political Events from Text (CASE 2021), co-located with the ACL-IJCNLP 2021 Conference. Four teams participated in the task. Best performing systems for the three aforementioned subtasks achieved 83.9%, 79.7% and 77.1% weighted F 1 scores respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The task of event classification is to assign to a text snippet an event type using a domain specific taxonomy. It constitutes an important step in the The views expressed in this article are those of the authors and not necessarily those of Erste Digital. process of event extraction from free texts (Appelt, 1999; Piskorski and Yangarber, 2013) which has been researched since mid 90's and gained a lot of attention in the context of development of realworld applications (King and Lowe, 2003; Yangarber et al., 2008; Atkinson et al., 2011; Leetaru and Schrodt, 2013; Ward et al., 2013; Pastor-Galindo et al., 2020) . While vast amount of challenges on automated event extraction, including event classification, has been organised in the past, relatively little efforts have been reported on approaches and shared tasks focusing specifically on fine-grained event classification.", |
|
"cite_spans": [ |
|
{ |
|
"start": 301, |
|
"end": 315, |
|
"text": "(Appelt, 1999;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 316, |
|
"end": 346, |
|
"text": "Piskorski and Yangarber, 2013)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 474, |
|
"end": 495, |
|
"text": "(King and Lowe, 2003;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 496, |
|
"end": 519, |
|
"text": "Yangarber et al., 2008;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 520, |
|
"end": 542, |
|
"text": "Atkinson et al., 2011;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 543, |
|
"end": 569, |
|
"text": "Leetaru and Schrodt, 2013;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 570, |
|
"end": 588, |
|
"text": "Ward et al., 2013;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 589, |
|
"end": 617, |
|
"text": "Pastor-Galindo et al., 2020)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This paper describes the Shared Task on Finegrained Event Classification in News-like Text Snippets. The task is divided into three subtasks: (a) classification of text snippets reporting sociopolitical events (25 classes) for which vast amount of training data exists, although exhibiting slightly different structure and style vis-a-vis test data, (b) enhancement to a generalized zero-shot learning problem (Chao et al., 2016) , where 3 additional event types were introduced in advance, but without any training data ('unseen' classes) , and (c) further extension, which introduced 2 additional event types, announced shortly prior to the evaluation phase. The reported Shared Task focuses on classification of events in English texts and is organized as part of the Workshop on Challenges and Applications of Automated Extraction of Socio-political Events from Text (CASE 2021) , co-located with the ACL-IJCNLP 2021 Conference. Four teams actively participated in the task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 410, |
|
"end": 429, |
|
"text": "(Chao et al., 2016)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 521, |
|
"end": 539, |
|
"text": "('unseen' classes)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The main rationale behind organising this Shared Task is not only to foster research on fine-grained event classification, a relatively understudied area, but to specifically explore robust and flexible solutions that are of paramount importance in the context of real-world applications. For instance, often available training data is slightly different from the data on which event classification might be applied (data drift). Furthermore, in real-world scenarios one is interested in quickly tailoring an existing solution to frequent extensions of the underlying event taxonomy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The paper is organized as follows. Section 2 reviews prior work. Section 3 describes the Shared Task in more detail. Section 4 describes the training and test datasets. Next, the evaluation methodology is introduced in Section 5. Baseline and participant systems are described in Section 6. Subsequently, Section 7 presents the results obtained by these systems, whereas Section 8 discusses the main findings of the Shared Task. We present the conclusions in Section 9.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The research on event detection and classification in free-text documents was initially triggered by the Message Understanding Contests (Sundheim, 1991; Chinchor, 1998) and the Automatic Content Extraction Challenges (ACE) (Doddington et al., 2004; LDC, 2008) . The event annotated corpora produced in the context of the aforementioned challenges fostered research on various techniques of event classification, which encompass purely knowledge-based approaches (Stickel and Tyson, 1997) , shallow (Liao and Grishman, 2010; Hong et al., 2011) and deep machine learning approaches (Nguyen and Grishman, 2015; Nguyen et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 136, |
|
"end": 152, |
|
"text": "(Sundheim, 1991;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 153, |
|
"end": 168, |
|
"text": "Chinchor, 1998)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 223, |
|
"end": 248, |
|
"text": "(Doddington et al., 2004;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 249, |
|
"end": 259, |
|
"text": "LDC, 2008)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 462, |
|
"end": 487, |
|
"text": "(Stickel and Tyson, 1997)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 498, |
|
"end": 523, |
|
"text": "(Liao and Grishman, 2010;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 524, |
|
"end": 542, |
|
"text": "Hong et al., 2011)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 580, |
|
"end": 607, |
|
"text": "(Nguyen and Grishman, 2015;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 608, |
|
"end": 628, |
|
"text": "Nguyen et al., 2016)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Prior Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Multi-lingual Event Detection and Co-reference challenge was introduced more recently in the Text Analysis Conference (TAC) in 2016 1 and 2017 2 . In particular, it included an Event Nugget Detection subtask, which focused on detection and finegrained classification of intra-document event mentions, covering events from various domains (e.g., finances and jurisdiction).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Prior Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "One could observe in the last decade an ever growing interest in research on fine-grained event classification. Lefever and Hoste (2016) compared SVM-based models against word-vectorbased LSTMs for classification of 10 types of company-specific economic events from news texts, whereas Nugent et al. (2017) studied the performance of various models, including ones that exploit word embeddings as features, for detection and classification of natural disaster and crisis events in news articles. Jacobs and Hoste (2020) reports on experiments of exploiting BERT embedding-based models for fine-grained event extraction for the financial domain.", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 136, |
|
"text": "Lefever and Hoste (2016)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 286, |
|
"end": 306, |
|
"text": "Nugent et al. (2017)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 496, |
|
"end": 519, |
|
"text": "Jacobs and Hoste (2020)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Prior Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Although most of the reported work in this area focuses on processing English texts, and in particular, news-like texts as presented in Piskorski et al. (2020) , some efforts on event classification for non-English language were reported too. For instance, Sahoo et al. (2020) introduced a benchmark corpus for fine-grained classification of natural and man-made disasters (28 types) for Hindi, accompanied with evaluation of deep learning baseline models for this task. Furthermore, an example of fine-grained classification of cyberbullying events (7 classes) in social media posts was presented in Van Hee et al. 2015.", |
|
"cite_spans": [ |
|
{ |
|
"start": 136, |
|
"end": 159, |
|
"text": "Piskorski et al. (2020)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Prior Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Work on classification of socio-political events and the related shared tasks, although not focusing on fine-grained classification, but covering event types which are in the scope of our task, was presented in and H\u00fcrriyetoglu et al. (2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 215, |
|
"end": 241, |
|
"text": "H\u00fcrriyetoglu et al. (2019)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Prior Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The overall objective of this Shared Task is to evaluate the 'flexibility' of fine-grained event classifiers. Firstly, we are interested in the robustness vis-a-vis the input text structure, i.e., how classifiers trained on short texts from a curated database perform on news data taken from diverse sources where this structure is somewhat different. This corresponds to Subtask 1, which can be considered as a regular classification task. Secondly, we wanted to study how classifiers can be made flexible regarding the taxonomy used, with the aim of easily tailoring them for specific needs. This corresponds to Subtask 2 and 3, which were framed as generalized zero-shot learning problems: the label set for Subtask 2 was announced in advance, while the label set for Subtask 3 was announced on the day of the competition.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The aforementioned objectives arise from the practical constraints of working with real data, being exposed to data drift and having different users being interested in different facets of the same events.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In order to train a fine-grained event classifier, we proposed to use ACLED (Raleigh et al., 2010) event database and the corresponding taxonomy described in the ACLED Codebook 3 , which has 25 subtypes of events related to socio-political events and violent conflicts. ACLED created a large dataset of events over several years which are manually curated with a common pattern in the way of reporting events and uses a complex event taxonomy: The boundary between the definition of similar classes can be highly intricate, and can seem at point quite arbitrary. Nevertheless, ACLED presented itself as the best possible training material for the specific objectives of this Shared Task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 98, |
|
"text": "(Raleigh et al., 2010)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "More precisely, the formal definitions of the different subtasks are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Subtask 1:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Classification of text snippets that are assigned to ACLED types only,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Subtask 2 (generalized zero-shot):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Classification of text snippets that are assigned to all ACLED types plus three unseen (non-ACLED) types, namely:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Organized Crime, Natural Disaster and Man-made Disaster, these new types were announced in advance, but no training data was provided,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Subtask 3 (generalized zero-shot):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Classification of text snippets that are assigned to two additional unseen event types (Diplomatic Event and Attribution of Responsibility) on top of the ones of Subtask 2, these new types were not announced in advance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The participating teams had the possibility to submit solutions to any number of subtasks without condition, whereas per subtask up to 5 system responses could be submitted for evaluation. More information on the event types for this Shared Task is provided in Appendix A.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "For the training purposes the participants were allowed to either exploit any freely available existing event-annotated textual corpora and/or to exploit the short text snippets reporting events which are part of the large event database created by ACLED and which can be obtained from ACLED data portal 4 for research and academic purposes. Furthermore, the participants were also recommended to exploit as an inspiration the techniques for text normalization and cleaning of ACLED data, and some baseline classification models trained using ACLED data described in Piskorski et al. (2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 567, |
|
"end": 590, |
|
"text": "Piskorski et al. (2020)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Data", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For the purpose of evaluating the predictive performance of the competing systems a dedicated test set was created based on news-like text snippets. To this end we sourced the web to collect short texts reporting on events either in the form of online news or of a similar style. We posed simple queries with label-specific keywords using conventional search engines to collect relevant text snippets. The most frequent keywords from ACLED datasets have been used a basis to form these queries. The collected set of snippets was cleaned by removing duplicates and further enhanced by adding both manually as well as automatically perturbed short news-like texts. More specifically, for selected snippets the most characteristic keywords were manually replaced by either less common or more vague expressions, so that the event type from the ACLED taxonomy can be still predicted, albeit making it more difficult. Also the reported figures, methods or outcomes of the event were subject to changes. Furthermore, about 15% of the text snippets were automatically perturbed 5 by: (a) replacing all day and month names mentions with another randomly chosen day and month resp., and (b) replacing each occurrence of a toponym referring to a populated place with randomly chosen toponym selected from GEON-AMES gazetteer 6 of about 200K populated cities, whose population is at least 500. The perturbed snippets were additionally inspected in order to make sure that the changes allow for guessing the event type vis-a-vis ACLED taxonomy. Only the perturbed version of the original text snippet were included in the test dataset, the original ones were discarded. An example of original text and the automatically perturbed version thereof is provided in Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1749, |
|
"end": 1757, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Test Data", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "A Catalan pro-independence demonstrator throws a fence into a fire during a protest against police action in Barcelona, Spain, October 26, 2019", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Test Data", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "A Madukkarai pro-independence demonstrator throws a fence into a fire during a protest against police action in Podosinovets, Hohenm\u00f6lsen, June 26, 2019 The distribution of the counts by event type is shown in Figure 3 , whereas the distributions of the sequence length by event type is shown in Figure 4 . The created test set consists in total of 1019 text snippets, 190 of which were annotated with labels corresponding to the zero-shot classes. An example of text snippet reporting a Government regains territory event is provided in Figure 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 210, |
|
"end": 218, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 296, |
|
"end": 304, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 538, |
|
"end": 546, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Test Data", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Syrian government forces have captured a central town and adjacent villages, boosting security in nearby areas loyal to President Bashar Assad, and marched deeper into a rebel-held neighborhood of Damascus, Syrian state media and an opposition monitoring group said Sunday. The annotation was performed by two pairs of independent annotators, cross-validating the annotated snippets. The initial disagreement rate was observed to be roughly 10-15%. Most unclear text snippets, for which there were comparably strong arguments for assigning two or more labels, were removed from the test dataset. For text snippets reporting on multiple events, the more recent event was considered to be the main event (and given the priority for determining the type), whereas the remaining events were considered only as background information. Some ambiguities were solved by aligning on common assumptions, e.g. if there is no explicit mention of violence, a protest reported in the snippet was considered to be a peaceful one. It is important to emphasize that the created test dataset for the Shared Task reported in this paper contains text snippets reporting events, which were prepared solely for the purpose of evaluating solutions for automated fine-grained classification of events reported in short texts. 7", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Test Data", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "For measuring the event classification performance we used precision, recall, and the micro, macro and weighted F 1 metric. While the micro version calculates the performance from the classification of individual instances vis-a-vis the all-class model, in macro-averaging, one computes the performance of each individual class separately, and then an average of the obtained scores is computed. The weighted F 1 is similar to the macro version, but computes the average considering the proportion for each class in the dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation methodology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We provide two baseline systems: a simple character n-gram based L2-regularized logistic regression model and a system based on two Transformerbased deep neural representation models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Systems 6.1 Baseline Systems", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "For Subtask 1 we have trained a L2-regularized Logistic Regression-based model with log-scaled TF-IDF values of 3 to 5 character ngrams found in the text snippets as features 8 (non-optimized, with C = 1.0 and = 0.01) using LIBLINEAR library 9 . In particular, a more balanced subset of ca. 129K event snippets from ACLED-III (Piskorski et al., 2020) was used, i.e., all high-populated classes were under-sampled with a maximum of 10K instances per class.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "L2-regularized Logistic Regression on character n-grams (L2LR baseline )", |
|
"sec_num": "6.1.1" |
|
}, |
|
{ |
|
"text": "As our main baseline model for Subtasks 1-3 we use a combination of two Transformer-based unsupervised language representation models: a multilayer bidirectional Transformer encoder BERT (Devlin et al., 2019) and a sequence-to-sequence autoencoder BART (Lewis et al., 2019) . As a base classifier we employ the BERT-BASE model, pretrained using two unsupervised tasks: masked language model and next sentence prediction on lowerthe text snippets in the test dataset might have a link to some real-world events the information contained in the snippets may contradict factual information. Consequently, this dataset should not be used as a database of events for the analysis of real-world socio-political developments and conflict events. 8 An n-gram is considered as a feature only if it appears at least 15 times in the training data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 253, |
|
"end": 273, |
|
"text": "(Lewis et al., 2019)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combined deep Transformers BERT and BART (BB baseline )", |
|
"sec_num": "6.1.2" |
|
}, |
|
{ |
|
"text": "9 https://www.csie.ntu.edu.tw/\u02dccjlin/ liblinear cased English text of the BooksCorpus (800M words) and English Wikipedia (2,500M words) and fine-tuned for supervised classification using ACLED-III data as described in Piskorski et al. (2020) . For Subtasks 2-3 involving a zero-shot learning problem our baseline system relies on the following further steps. The test set observations (text snippets) for which the predicted logits (outputs before the sof tmax normalization) obtained using fine-tuned BERT fall below the threshold l = 7, or for which the predicted label corresponds to the Other class, are passed to the second stage of processing using BART. In the second stage with the objective to tackle the zero-shot learning problem we use BART-LARGE-MNLI, pre-trained on the Multi-Genre Natural Language Inference (MNLI) corpus of 433k sentence pairs annotated with textual entailment information (Williams et al., 2018) . In this stage, the classification task is reformulated as the natural language inference (NLI) task of determining whether a hypothesis is true (entailment) or false (contradiction), given a premise. We follow the approach proposed in Yin et al. (2019) and take the text snippet as the premise and the descriptive forms of candidate labels as alternative hypotheses. The final label is assigned in this stage based on the largest probability of entailment obtained using BART. For each text snippet being processed in this stage the set of candidate labels is defined as consisting of the label predicted in the first stage by the BERT model and all labels of the zero-shot (unseen) classes relevant for the respective subtask.", |
|
"cite_spans": [ |
|
{ |
|
"start": 218, |
|
"end": 241, |
|
"text": "Piskorski et al. (2020)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 906, |
|
"end": 929, |
|
"text": "(Williams et al., 2018)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 1167, |
|
"end": 1184, |
|
"text": "Yin et al. (2019)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combined deep Transformers BERT and BART (BB baseline )", |
|
"sec_num": "6.1.2" |
|
}, |
|
{ |
|
"text": "Eight teams registered for the task, whereas four teams submitted their system responses: ICIP (Institute of Software Chinese Academy of Sciences), FKIE-ITF (Fraunhofer Institute for Communication, Information Processing and Ergonomics), IBM-MNLP (IBM Multilingual Natural Language Processing), UNCC (University of North Carolina Charlotte). All participants took part in all 3 subtasks, with the exception of FKIE-ITF which took part only in Subtask 1. We provide short overview of these systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participant Systems", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "For Subtask 1 all teams used a fine-tuned ROBERTA as their base classification model. For Subtask 2, most of the teams used a hybrid solution, using a diversity of classifiers, one team did use few shot learning (therefore diverging from the zero shot problem statement). For Subtask 3, where a zero-shot classifier was mandatory, all participants based their system on a Transformer-based model trained on an NLI task, with some variations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participant Systems", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Despite using the same base approaches, each team focused in its submission on different ways to improve it: ICIP tried different attention mechanisms; FKIE-ITF (Kent and Krumbiegel, 2021) explored different text pre-processing techniques and used sub-sampling; IBM-MNLP (Barker et al., 2021) tried re-ranking different combination of fewshot, zero-shot and regular classifiers; UNCC (Radford, 2021) focused on using a single NLI learning approach for all tasks and used a specific subsampling.", |
|
"cite_spans": [ |
|
{ |
|
"start": 161, |
|
"end": 188, |
|
"text": "(Kent and Krumbiegel, 2021)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 271, |
|
"end": 292, |
|
"text": "(Barker et al., 2021)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participant Systems", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "The results for all submitted system responses for all 3 subtasks in terms of precision, recall and F 1 weighted average scores are provided in Table 1 , 2 and 3 respectively, detailed results are given in Appendix B. Each team had the possibility to submit a maximum of 5 configurations per subtask, all of which are reported in the table, and identified by a numerical extension. As an overview of the obtained results, the best performing systems for the three subtasks are 83.9%, 79.7% and 77.1% weighted F 1 scores respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 144, |
|
"end": 151, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The two teams that reported using undersampling due to lack of sufficient computational resources, are also the ones having the overall lowest score on Subtask 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "In Table 2 , all submissions of team IBM-MNLP are few-shots excepts for their last submission: IBM-MNLP 2.4. Both of their few-shot and zeroshot configurations perform better then systems of any other team for Subtask 2. In Table 3 , their first and third submissions are zero shot for the 5 new types, while their two other submissions are zero-shot only for the 2 new types.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 224, |
|
"end": 231, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "For Subtask 3, the best weighted F 1 score for zero-shot classifier restricted to the 5 new classes only are the following: 65.1% for ICIC, 52.9% for IBM-MNLP and 26.2% for UNCC, c.f. Table 7 for details.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 184, |
|
"end": 191, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The results of all three subtasks provide interesting insights on fine-grained event classification in the context of real-world applications, where practical constraints can lead to a setup with a drift between the data on which the models were trained and for which predictions are generated, and where unseen classes can naturally pose a zero-shot learning problem. Firstly, we conclude that in Subtask 1 the Transformer-based BERT and ROBERTA were observed to lead to virtually the same level of per-formance in terms of all considered metrics. This observation is interesting, as e.g. on the GLUE benchmark (Wang et al., 2018) ROBERTA is shown to outperform BERT. Secondly, after enhancing the classification task to a generalized zero-shot learning problems in Subtask 2 and 3, the submitted results suggest that the best solutions are, very similar to our baseline BB baseline described in Section 6.1.2, based on the two-stage approach employing a supervised, fine-tuned Transformer-based classifier and another Transformer-based model instance trained on the MNLI data for tackling the zero-shot classification as the sentence-entailment problem. Interestingly, only one team (UNCC) submitted a single-stage model, trained on the entailmentlike reformulation of the classification problem. We hypothesize that compared to the single-stage entailment-like setup, the two-stage approaches might more effectively utilize the information provided in the available training data. The significant differences in performance values between these two paradigms in all three subtasks (73.6% vs. 83.9% in Subtask 1, 63.5% vs. 79.7% in Subtask 2 and 60.5% vs. 77.1% in Subtask 3) might seem to confirm this hypothesis. However, it should be stressed that the submissions following the singlestage entailment-like setup were made with a disclaimer on computational limitations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 612, |
|
"end": 631, |
|
"text": "(Wang et al., 2018)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overall Results", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "In order to provide some flavour of most typical errors and difficulties of automatically labelling event snippets using ACLED taxonomy Figure 5 provides the confusion matrix, normalized over the true conditions (rows), for the BB baseline approach applied to solve Subtask 1.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 136, |
|
"end": 144, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Overall Results", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "The most significant type of error is the misclassification of Force Against Protest as Protest With Interventions (39%), Property Destruction as Mob Violence (29%) and as Violent Demonstration (24%) and Artillery/Missile Attack as Armed Clash (19%). Given a fine line between these types, the above error rates are not surprising. More generally, one can observe that distinguishing between the sub-types belonging to the same main type (see the ACLED taxonomy in Appendix A), is typically more challenging. Also, it is not surprising that the Other class has also a relatively low recall of 50%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overall Results", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "As regards models robustness, in Piskorski et al. (2020) , the reported F 1 score of the BERT-based ACLED-trained classifier when evaluated on ACLED data yield about 94.4%. In Subtask 1, using similar Transformer-based classifier lead to a maximal score of 83.9%: we observe approx. 10 percentage point drop in performance. It is important to mention herethat the former model used 80% of the ACLED data for training, whereas the latter used the entire ACLED dataset reported in Piskorski et al. (2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 33, |
|
"end": 56, |
|
"text": "Piskorski et al. (2020)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 479, |
|
"end": 502, |
|
"text": "Piskorski et al. (2020)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overall Results", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "Class-wise performance comparison of both classifiers are reported in Table 8 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 77, |
|
"text": "Table 8", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Overall Results", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "Such a performance drop can be explained in part by the fact that text snippets in the ACLED follow a pattern that is different than news-like reporting, and as such the classifier struggles to generalize to the real-world news-like reporting style, despite the standard regularization techniques.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overall Results", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "The performance drop is not equally distributed over the classes. Actually, when applying to news data, roughly half of the classes have better scores, and half have worse scores.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overall Results", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "One possible reason for this performance drop seems to be the three most populated classes in the ACLED dataset (Armed Clash, Attack, Artillery/Missile Attack) which on average lost 18 points when compared with the results of the baseline model BB baseline .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overall Results", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "Having used ACLED taxonomy in the context of this Shared Task have resulted in some reflections, both in terms of experience of using it to annotate text snippets reporting events and its practicality for a real-world application for automatically labelling news-like texts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ACLED taxonomy", |
|
"sec_num": "8.2" |
|
}, |
|
{ |
|
"text": "As regards the annotation of news-like text snippets great care has been taken to follow strictly the ACLED Codebook. This turned to be a harder task than initially expected, in part due to shortcomings of the Codebook, and, in part due to the nature of how events are reported in the news.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ACLED taxonomy", |
|
"sec_num": "8.2" |
|
}, |
|
{ |
|
"text": "News texts often assume a known global context and do not provide enough information to allow to clearly assign an ACLED event subtype. This is due the high specificity of ACLED subtypes that make it hard, for instance, to classify a text describing a demonstration, if it can not be understood from the text whether the event was violent, and if this was the case, which side started the violence, i.e., the demonstrators or the authority tasked to thwart the demonstration. All such information is needed to select the proper ACLED event class. Having said this, it is worthwhile to mention here that sometimes the nuances between the definitions of the event types are very small and we also found certain inconsistencies between the entries in the ACLED event database itself, e.g. for the Protest with Intervention and Excessive force against the protesters categories the corresponding text descriptions did not differ much, and at times using certain instrument to intervene was mentioned in the case of both events. Clearly, when encoding an event using ACLED taxonomy based on HUMINT and without considering any source text the human knows the event type upfront, and hence, the resulting text describing the event might not fully reflect/mirror the specific of the particular event type. This poses a certain limitation to what extent the textual descriptions of events in ACLED can be useful for training models to be applied on news-like data, but to have a better picture a full-fledged study of the aforementioned inconsistencies should be carried out, which is out of scope of the Shared Task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ACLED taxonomy", |
|
"sec_num": "8.2" |
|
}, |
|
{ |
|
"text": "The high specificity of the ACLED taxonomy is also at times problematic as it was not designed for multi-label classification tasks. As such, an attack on a civilian with a suicide bomber can not be classified as suicide bombing event according to ACLED taxonomy if any other interaction took place and is reported, for instance, if the text mentions also assailants attack with firearms first before detonating the bomb or if the police tries to stop them. In such a case the Armed Clash event type has to be used. On the other hand, intuitively, it would make sense that the text is tagged with at least two labels: Attack (attack on civilian) and Suicide bombing, or potentially also a tag that represents an authority intervention. ACLED taxonomy imposes a complex and incomplete set of priorities in order to enforce an event to be labelled using a mono-dimensional classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ACLED taxonomy", |
|
"sec_num": "8.2" |
|
}, |
|
{ |
|
"text": "Another issue encountered when using this taxonomy is related to the fact that definitions of some event classes are unclear and not intuitive per-se. For instance, the class Arrest which accounts for either mass arrests or arrest of VIPs, but not for arrests of \"one or few\" people, which fall under a different type. Furthermore, problematic is also the fact that some classes are actually determined not only by what actually happened but also by who was the main actor involved. For instance, the class Government retakes territory and Non-state actor captures territory are almost indistinguishable when the named entities are shuffled. What is more, the taxonomy does not specify how to handle certain cases, e.g., when a non-government actor is acting on behalf of or is supported by the government in regaining/overtaking territory.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ACLED taxonomy", |
|
"sec_num": "8.2" |
|
}, |
|
{ |
|
"text": "Lastly, disregarding the strictly monodimensional nature of ACLED taxonomy, most news text snippets (even single sentences) report on more than one event, and determining which one is the salient one is not always straightforward even to human annotators. One of our observations is that for labelling news reporting on events a multi-class labelling approach would be more intuitive and logical.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ACLED taxonomy", |
|
"sec_num": "8.2" |
|
}, |
|
{ |
|
"text": "This paper reported on the outcome of the Shared Task on Fine-grained Event Classification in Newslike Text Snippets that has been organized as part of the Workshop on Challenges and Applications of Automated Extraction of Socio-political Events from Text (CASE 2021), co-located with the ACL-IJCNLP 2021 Conference.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "8 teams registered to participate in the task, while 4 of them submitted system responses for 3 subtasks, two of which were generalized zeroshot learning tasks. Given the specific set up of the shared task, i.e., the training data being somewhat different from the test data and inclusion of 5 unseen classes the top results obtained can be considered good, however, there is definitely place for improvement. Furthermore, we intend to carry out comparative error analysis across systems, which might reveal some additional insights into the complexity of the task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "Further documentation and related material on the reported Shared Task can be found at https://github.com/emerging-welfare/ case-2021-shared-task/tree/main/task2, whereas the test dataset alone is also available at:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "http://piskorski.waw.pl/resources/ case2021/data.zip for research purposes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "We believe that the reported results, findings and the annotated test dataset will contribute to stimulating further research on fine-grained event classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "The ACLED event taxonomy comprises of six main event types which are further subdivided into 25 sub-event types as follows: For further details on ACLED event taxonomy please refer to the ACLED codebook.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Event Types", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We provide here the description of the 5 new types used in the Shared Task. The first three new types cover contextually important security-and safety-related events and developments that are not related to political violence and not considered to contribute to political dynamics within and across multiple states. The last two new types cover events directly related to security situation, and as such fall under the Strategic Development main event type of ACLED, however, they are mainly related to announcements instead of concrete deeds. The 5 additional new types are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Event Types", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Organized crime: This event type covers incidents related to activities of criminal groups, excluding conflict between such groups: smuggling, human trafficking, counterfeit products, property crime, cyber crime, assassination (for criminal purposes), corruption, etc.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Event Types", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Natural Disaster: This event type covers any kind of natural disasters and hazards where there is a direct or potential harm, including: earthquakes, tsunami, floods, storms, fires, volcano eruptions, landslides, avalanches, infectious disease outbreaks, pandemics, climate related, etc.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Event Types", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Man-made Disaster: This event type covers any kind of disasters caused by humans where there is a direct or potential harm, such as: industrial accidents, traffic incidents, infrastructure failure, foodchain contamination, etc.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Event Types", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Diplomatic Event: This event type covers any kind of diplomatic action or announcement that have a potential impact on the security situation or denoting the attitude of a country towards a conflict. As such this type covers diplomatic measures declaration (e.g. sanctions or closure of embassies), threats, call for actions, praises and condemnations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Event Types", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Attribution of Responsibility: This event type covers announcements related to the responsibility of attacks and hostile operations. In particular, this event type covers group claiming their own responsibility, accusation of responsibility and denial of responsibility.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Event Types", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://tac.nist.gov//2016/KBP/Event/ index.html 2 https://tac.nist.gov/2017/KBP/Event/ index.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://acleddata.com/acleddatanew/ wpcontent/uploads/dlm_uploads/2019/01/ ACLED_Codebook_2019FINAL.docx.pdf", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://acleddata.com/ data-export-tool 5 The choice of 15% was motivated by the willingness to add some (but not too much) additional complexity to the task.6 https://www.geonames.org/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Disclaimer: A significant fraction of the text snippets in the test dataset has no link to any real-world event whatsoever and, in particular, the locations mentioned therein were selected completely at random. As such, even though some of", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Introduction to information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Douglas", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Appelt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "AI Commun", |
|
"volume": "12", |
|
"issue": "3", |
|
"pages": "161--172", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Douglas E. Appelt. 1999. Introduction to information extraction. AI Commun., 12(3):161-172.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Multilingual Real-Time Event Extraction for Border Security Intelligence Gathering", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Atkinson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakub", |
|
"middle": [], |
|
"last": "Piskorski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Yangarber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Van Der Goot", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Open Source Intelligence and Counter-terrorism", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Atkinson, Jakub Piskorski, Roman Yangarber, and Erik van der Goot. 2011. Multilingual Real- Time Event Extraction for Border Security Intelli- gence Gathering. In Open Source Intelligence and Counter-terrorism. Springer, LNCS, Vol. 2.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "IBM MNLP IE at CASE 2021 Task 2: NLI Reranking for Zero-Shot Text Classification", |
|
"authors": [ |
|
{ |
|
"first": "Ken", |
|
"middle": [], |
|
"last": "Barker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Parul", |
|
"middle": [], |
|
"last": "Awasthy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Ni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radu", |
|
"middle": [], |
|
"last": "Florian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 4 th Workshop on Challenges and Applications of Automated Extraction of Sociopolitical Events from Text (CASE 2021)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ken Barker, Parul Awasthy, Jian Ni, and Radu Flo- rian. 2021. IBM MNLP IE at CASE 2021 Task 2: NLI Reranking for Zero-Shot Text Classification. In Proceedings of the 4 th Workshop on Challenges and Applications of Automated Extraction of Socio- political Events from Text (CASE 2021). Association for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "An empirical study and analysis of generalized zero-shot learning for object recognition in the wild", |
|
"authors": [ |
|
{ |
|
"first": "Wei-Lun", |
|
"middle": [], |
|
"last": "Chao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soravit", |
|
"middle": [], |
|
"last": "Changpinyo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Boqing", |
|
"middle": [], |
|
"last": "Gong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Sha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "European conference on computer vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "52--68", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei-Lun Chao, Soravit Changpinyo, Boqing Gong, and Fei Sha. 2016. An empirical study and analysis of generalized zero-shot learning for object recogni- tion in the wild. In European conference on com- puter vision, pages 52-68. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Overview of MUC-7", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Nancy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Chinchor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of a Conference Held in Fairfax, Virginia", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nancy A. Chinchor. 1998. Overview of MUC-7. In Seventh Message Understanding Conference (MUC- 7): Proceedings of a Conference Held in Fairfax, Vir- ginia, April 29 -May 1, 1998.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Un- derstanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "The Automatic Content Extraction (ACE) Program -Tasks, Data, and Evaluation", |
|
"authors": [ |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Doddington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Przybocki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lance", |
|
"middle": [], |
|
"last": "Ramshaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephanie", |
|
"middle": [], |
|
"last": "Strassel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Weischedel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the Fourth International Conference on Language Resources and Evaluation (LREC'04)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George Doddington, Alexis Mitchell, Mark Przybocki, Lance Ramshaw, Stephanie Strassel, and Ralph Weischedel. 2004. The Automatic Content Extrac- tion (ACE) Program -Tasks, Data, and Evalua- tion. In Proceedings of the Fourth International Conference on Language Resources and Evaluation (LREC'04), Lisbon, Portugal. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Using Cross-Entity Inference to Improve Event Extraction", |
|
"authors": [ |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Hong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bin", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianmin", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guodong", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiaoming", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1127--1136", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu Hong, Jianfeng Zhang, Bin Ma, Jianmin Yao, Guodong Zhou, and Qiaoming Zhu. 2011. Using Cross-Entity Inference to Improve Event Extraction. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, pages 1127-1136, Portland, Oregon, USA. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Challenges and Applications of Automated Extraction of Socio-political Events from Text (CASE 2021): Workshop and Shared Task Report", |
|
"authors": [ |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "H\u00fcrriyetoglu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hristo", |
|
"middle": [], |
|
"last": "Tanev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vanni", |
|
"middle": [], |
|
"last": "Zavarella", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakub", |
|
"middle": [], |
|
"last": "Piskorski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Reyyan", |
|
"middle": [], |
|
"last": "Yeniterzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erdem", |
|
"middle": [], |
|
"last": "Y\u00f6r\u00fck", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 4 th Workshop on Challenges and Applications of Automated Extraction of Socio-political Events from Text (CASE 2021). Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ali H\u00fcrriyetoglu, Hristo Tanev, Vanni Zavarella, Jakub Piskorski, Reyyan Yeniterzi, and Erdem Y\u00f6r\u00fck. 2021. Challenges and Applications of Auto- mated Extraction of Socio-political Events from Text (CASE 2021): Workshop and Shared Task Re- port. In Proceedings of the 4 th Workshop on Chal- lenges and Applications of Automated Extraction of Socio-political Events from Text (CASE 2021). Asso- ciation for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Overview of CLEF 2019 lab protestnews: Extracting protests from news in a cross-context setting", |
|
"authors": [ |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "H\u00fcrriyetoglu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erdem", |
|
"middle": [], |
|
"last": "Y\u00f6r\u00fck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deniz", |
|
"middle": [], |
|
"last": "Yuret", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Burak", |
|
"middle": [], |
|
"last": "Agri Yoltar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Firat", |
|
"middle": [], |
|
"last": "G\u00fcrel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Osman", |
|
"middle": [], |
|
"last": "Durusan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arda", |
|
"middle": [], |
|
"last": "Mutlu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Akdemir", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Experimental IR Meets Multilinguality, Multimodality, and Interaction -10th International Conference of the CLEF Association, CLEF 2019", |
|
"volume": "11696", |
|
"issue": "", |
|
"pages": "425--432", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-030-28577-7_32" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ali H\u00fcrriyetoglu, Erdem Y\u00f6r\u00fck, Deniz Yuret, \u00c7 agri Yoltar, Burak G\u00fcrel, Firat Durusan, Osman Mutlu, and Arda Akdemir. 2019. Overview of CLEF 2019 lab protestnews: Extracting protests from news in a cross-context setting. In Experimental IR Meets Multilinguality, Multimodality, and Interaction - 10th International Conference of the CLEF Associ- ation, CLEF 2019, Lugano, Switzerland, Septem- ber 9-12, 2019, Proceedings, volume 11696 of Lec- ture Notes in Computer Science, pages 425-432. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Cross-Context News Corpus for Protest Event-Related Knowledge Base Construction", |
|
"authors": [ |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "H\u00fcrriyetoglu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erdem", |
|
"middle": [], |
|
"last": "Y\u00f6r\u00fck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Osman", |
|
"middle": [], |
|
"last": "Mutlu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F\u0131rat", |
|
"middle": [], |
|
"last": "Duru\u015fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deniz", |
|
"middle": [], |
|
"last": "Agr\u0131 Yoltar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Burak", |
|
"middle": [], |
|
"last": "Y\u00fcret", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "G\u00fcrel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Data Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--28", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ali H\u00fcrriyetoglu, Erdem Y\u00f6r\u00fck, Osman Mutlu, F\u0131rat Duru\u015fan, \u00c7 agr\u0131 Yoltar, Deniz Y\u00fcret, and Burak G\u00fcrel. 2021. Cross-Context News Corpus for Protest Event-Related Knowledge Base Construc- tion. Data Intelligence, pages 1-28.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Extracting fine-grained economic events from business news", |
|
"authors": [ |
|
{ |
|
"first": "Gilles", |
|
"middle": [], |
|
"last": "Jacobs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veronique", |
|
"middle": [], |
|
"last": "Hoste", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 1st Joint Workshop on Financial Narrative Processing and MultiLing Financial Summarisation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "235--245", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gilles Jacobs and Veronique Hoste. 2020. Extracting fine-grained economic events from business news. In Proceedings of the 1st Joint Workshop on Finan- cial Narrative Processing and MultiLing Financial Summarisation, pages 235-245, Barcelona, Spain (Online). COLING.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "CASE 2021 Task 2: Socio-political Fine-grained Event Classification using Fine-tuned RoBERTa Document Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Samantha", |
|
"middle": [], |
|
"last": "Kent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Theresa", |
|
"middle": [], |
|
"last": "Krumbiegel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 4 th Workshop on Challenges and Applications of Automated Extraction of Socio-political Events from Text (CASE 2021). Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samantha Kent and Theresa Krumbiegel. 2021. CASE 2021 Task 2: Socio-political Fine-grained Event Classification using Fine-tuned RoBERTa Docu- ment Embeddings. In Proceedings of the 4 th Workshop on Challenges and Applications of Auto- mated Extraction of Socio-political Events from Text (CASE 2021). Association for Computational Lin- guistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "An Automated Information Extraction Tool For International Conflict Data with Performance as Good as Human Coders", |
|
"authors": [ |
|
{ |
|
"first": "Gary", |
|
"middle": [], |
|
"last": "King", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Lowe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "International Organization", |
|
"volume": "57", |
|
"issue": "", |
|
"pages": "617--642", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gary King and Will Lowe. 2003. An Automated In- formation Extraction Tool For International Conflict Data with Performance as Good as Human Coders. International Organization, 57:617-642.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Annotation Tasks and Specification", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ldc", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "LDC. 2008. Annotation Tasks and Specifica- tion. ONLINE: https://www.ldc.upenn.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "edu/collaborations/past-projects/ace/ annotation-tasks-and-specifications", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "edu/collaborations/past-projects/ace/ annotation-tasks-and-specifications.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Gdelt: Global data on events, location, and tone", |
|
"authors": [ |
|
{ |
|
"first": "Kalev", |
|
"middle": [], |
|
"last": "Leetaru", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Philip", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Schrodt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1979, |
|
"venue": "ISA Annual Convention", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kalev Leetaru and Philip A Schrodt. 2013. Gdelt: Global data on events, location, and tone, 1979- 2012. In ISA Annual Convention, volume 2.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "A Classification-based Approach to Economic Event Detection in Dutch News Text", |
|
"authors": [ |
|
{ |
|
"first": "Els", |
|
"middle": [], |
|
"last": "Lefever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V\u00e9ronique", |
|
"middle": [], |
|
"last": "Hoste", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "330--335", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Els Lefever and V\u00e9ronique Hoste. 2016. A Classification-based Approach to Economic Event Detection in Dutch News Text. In Proceedings of the Tenth International Conference on Language Re- sources and Evaluation (LREC'16), pages 330-335, Portoro\u017e, Slovenia. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal ; Abdelrahman Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ves", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1910.13461" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Mar- jan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov, and Luke Zettlemoyer. 2019. Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. arXiv preprint arXiv:1910.13461.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Using Document Level Cross-Event Inference to Improve Event Extraction", |
|
"authors": [ |
|
{ |
|
"first": "Shasha", |
|
"middle": [], |
|
"last": "Liao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "789--797", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shasha Liao and Ralph Grishman. 2010. Using Docu- ment Level Cross-Event Inference to Improve Event Extraction. In Proceedings of the 48th Annual Meet- ing of the Association for Computational Linguistics, pages 789-797, Uppsala, Sweden. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Joint Learning of Local and Global Features for Entity Linking via Neural Networks", |
|
"authors": [ |
|
{ |
|
"first": "Nicolas", |
|
"middle": [], |
|
"last": "Thien Huu Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mariano", |
|
"middle": [ |
|
"Rodriguez" |
|
], |
|
"last": "Fauceglia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oktie", |
|
"middle": [], |
|
"last": "Muro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alfio", |
|
"middle": [], |
|
"last": "Hassanzadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Massimiliano Gliozzo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sadoghi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2310--2320", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thien Huu Nguyen, Nicolas Fauceglia, Mariano Ro- driguez Muro, Oktie Hassanzadeh, Alfio Massimil- iano Gliozzo, and Mohammad Sadoghi. 2016. Joint Learning of Local and Global Features for Entity Linking via Neural Networks. In Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers, pages 2310-2320, Osaka, Japan. The COLING 2016 Organizing Committee.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Event Detection and Domain Adaptation with Convolutional Neural Networks", |
|
"authors": [ |
|
{ |
|
"first": "Huu", |
|
"middle": [], |
|
"last": "Thien", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "365--371", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thien Huu Nguyen and Ralph Grishman. 2015. Event Detection and Domain Adaptation with Convolu- tional Neural Networks. In Proceedings of the 53rd Annual Meeting of the Association for Computa- tional Linguistics and the 7th International Joint Conference on Natural Language Processing (Vol- ume 2: Short Papers), pages 365-371, Beijing, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "A comparison of classification models for natural disaster and critical event detection from news", |
|
"authors": [ |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Nugent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabio", |
|
"middle": [], |
|
"last": "Petroni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Natraj", |
|
"middle": [], |
|
"last": "Raman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucas", |
|
"middle": [], |
|
"last": "Carstens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jochen", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Leidner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "2017 IEEE International Conference on Big Data", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3750--3759", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Timothy Nugent, Fabio Petroni, Natraj Raman, Lucas Carstens, and Jochen L. Leidner. 2017. A compari- son of classification models for natural disaster and critical event detection from news. In 2017 IEEE In- ternational Conference on Big Data, BigData 2017, Boston, MA, USA, December 11-14, 2017, pages 3750-3759.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "The Not Yet Exploited Goldmine of OSINT: Opportunities, Open Challenges and Future Trends", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Pastor-Galindo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Nespoli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"G\u00f3mez" |
|
], |
|
"last": "M\u00e1rmol", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Mart\u00ednez P\u00e9rez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "IEEE Access", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "10282--10304", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Pastor-Galindo, P. Nespoli, F. G\u00f3mez M\u00e1rmol, and G. Mart\u00ednez P\u00e9rez. 2020. The Not Yet Exploited Goldmine of OSINT: Opportunities, Open Chal- lenges and Future Trends. IEEE Access, 8:10282- 10304.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "New benchmark corpus and models for fine-grained event classification: To BERT or not to BERT?", |
|
"authors": [ |
|
{ |
|
"first": "Jakub", |
|
"middle": [], |
|
"last": "Piskorski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacek", |
|
"middle": [], |
|
"last": "Haneczok", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Jacquet", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6663--6678", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jakub Piskorski, Jacek Haneczok, and Guillaume Jacquet. 2020. New benchmark corpus and mod- els for fine-grained event classification: To BERT or not to BERT? In Proceedings of the 28th Inter- national Conference on Computational Linguistics, pages 6663-6678, Barcelona, Spain (Online). Inter- national Committee on Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Information extraction: Past, present and future", |
|
"authors": [ |
|
{ |
|
"first": "Jakub", |
|
"middle": [], |
|
"last": "Piskorski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Yangarber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Multi-source, Multilingual Information Extraction and Summarization, Theory and Applications of Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "23--49", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jakub Piskorski and Roman Yangarber. 2013. Informa- tion extraction: Past, present and future. In Thierry Poibeau, Horacio Saggion, Jakub Piskorski, and Ro- man Yangarber, editors, Multi-source, Multilingual Information Extraction and Summarization, Theory and Applications of Natural Language Processing, pages 23-49. Springer Berlin Heidelberg.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Zero-Shot Classification of Fine-Grained Sociopolitical Events with Transformer Models", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Benjamin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 4 th Workshop on Challenges and Applications of Automated Extraction of Socio-political Events from Text (CASE 2021)", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benjamin Radford. 2021. CASE 2021 Task 2: Zero- Shot Classification of Fine-Grained Sociopolitical Events with Transformer Models. In Proceedings of the 4 th Workshop on Challenges and Applications of Automated Extraction of Socio-political Events from Text (CASE 2021). Association for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Introducing ACLED: An Armed Conflict Location and Event Dataset: Special Data Feature", |
|
"authors": [ |
|
{ |
|
"first": "Clionadh", |
|
"middle": [], |
|
"last": "Raleigh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Linke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H\u00e5vard", |
|
"middle": [], |
|
"last": "Hegre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Karlsen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Journal of Peace Research", |
|
"volume": "47", |
|
"issue": "5", |
|
"pages": "651--660", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Clionadh Raleigh, Andrew Linke, H\u00e5vard Hegre, and Joakim Karlsen. 2010. Introducing ACLED: An Armed Conflict Location and Event Dataset: Spe- cial Data Feature. Journal of Peace Research, 47(5):651-660.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Asif Ekbal, and Pushpak Bhattacharyya. 2020. A Platform for Event Extraction in Hindi", |
|
"authors": [ |
|
{ |
|
"first": "Saumajit", |
|
"middle": [], |
|
"last": "Sovan Kumar Sahoo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Saha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2241--2250", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sovan Kumar Sahoo, Saumajit Saha, Asif Ekbal, and Pushpak Bhattacharyya. 2020. A Platform for Event Extraction in Hindi. In Proceedings of The 12th Lan- guage Resources and Evaluation Conference, pages 2241-2250, Marseille, France. European Language Resources Association.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Fastus: A cascaded finite-state transducer for extracting information from natural-language text", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Stickel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mabry", |
|
"middle": [], |
|
"last": "Tyson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Finite-State Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "383--406", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Stickel and Mabry Tyson. 1997. Fastus: A cas- caded finite-state transducer for extracting informa- tion from natural-language text. In Finite-State Lan- guage Processing, pages 383-406. MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Overview of the Third Message Understanding Evaluation and Conference", |
|
"authors": [ |
|
{ |
|
"first": "Beth", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Sundheim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1991, |
|
"venue": "Third Message Uunderstanding Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Beth M. Sundheim. 1991. Overview of the Third Mes- sage Understanding Evaluation and Conference. In Third Message Uunderstanding Conference (MUC- 3): Proceedings of a Conference Held in San Diego, California, May 21-23, 1991.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Detection and Fine-Grained Classification of Cyberbullying Events", |
|
"authors": [ |
|
{ |
|
"first": "Cynthia", |
|
"middle": [], |
|
"last": "Van Hee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Els", |
|
"middle": [], |
|
"last": "Lefever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Verhoeven", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julie", |
|
"middle": [], |
|
"last": "Mennes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bart", |
|
"middle": [], |
|
"last": "Desmet", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the International Conference Recent Advances in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "672--680", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cynthia Van Hee, Els Lefever, Ben Verhoeven, Julie Mennes, Bart Desmet, Guy De Pauw, Walter Daele- mans, and Veronique Hoste. 2015. Detection and Fine-Grained Classification of Cyberbullying Events. In Proceedings of the International Confer- ence Recent Advances in Natural Language Process- ing, pages 672-680, Hissar, Bulgaria. INCOMA Ltd. Shoumen, BULGARIA.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Glue: A multi-task benchmark and analysis platform for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel R", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1804.07461" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. 2018. Glue: A multi-task benchmark and analysis platform for natural language understanding. arXiv preprint arXiv:1804.07461.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Comparing GDELT and ICEWS event data", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josh", |
|
"middle": [], |
|
"last": "Beger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Cutler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cassy", |
|
"middle": [], |
|
"last": "Dickenson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Dorff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Analysis", |
|
"volume": "21", |
|
"issue": "", |
|
"pages": "267--297", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael D Ward, Andreas Beger, Josh Cutler, Matt Dickenson, Cassy Dorff, and Ben Radford. 2013. Comparing GDELT and ICEWS event data. Anal- ysis, 21:267-297.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "A broad-coverage challenge corpus for sentence understanding through inference", |
|
"authors": [ |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Nangia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1112--1122", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adina Williams, Nikita Nangia, and Samuel Bowman. 2018. A broad-coverage challenge corpus for sen- tence understanding through inference. In Proceed- ings of the 2018 Conference of the North American Chapter of the Association for Computational Lin- guistics: Human Language Technologies, Volume 1 (Long Papers), pages 1112-1122. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Content Collection and Analysis in the Domain of Epidemiology", |
|
"authors": [ |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Yangarber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Von", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralf", |
|
"middle": [], |
|
"last": "Etter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Steinberger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of DrMED 2008: International Workshop on Describing Medical Web Resources at MIE 2008: the 21 st International Congress of the European Federation for Medical Informatics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roman Yangarber, Peter Von Etter, and Ralf Stein- berger. 2008. Content Collection and Analysis in the Domain of Epidemiology. In Proceedings of DrMED 2008: International Workshop on Describ- ing Medical Web Resources at MIE 2008: the 21 st International Congress of the European Federation for Medical Informatics 2008, Goeteborg, Sweden.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Benchmarking zero-shot text classification: Datasets, evaluation and entailment approach", |
|
"authors": [ |
|
{ |
|
"first": "Wenpeng", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamaal", |
|
"middle": [], |
|
"last": "Hay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1909.00161" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenpeng Yin, Jamaal Hay, and Dan Roth. 2019. Benchmarking zero-shot text classification: Datasets, evaluation and entailment approach. arXiv preprint arXiv:1909.00161.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Performance overview Subtask 3: weighted average scores on the 5 unknown types. Figure 5: Confusion matrix for BB baseline applied to Subtask 1", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Table 7: Performance overview Subtask 3: weighted average scores on the 5 unknown types. Figure 5: Confusion matrix for BB baseline applied to Subtask 1.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Sample text snippet reporting a violent demonstration event (top) and the perturbed version thereof (bottom)." |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Sample text snippet reporting an event." |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Event type count distribution in the test dataset." |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Distribution of the length of the text snippets by event type in the test dataset." |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "", |
|
"html": null, |
|
"content": "<table><tr><td colspan=\"5\">: Overall performance overview Subtask 1:</td></tr><tr><td colspan=\"2\">weighted average scores.</td><td/><td/></tr><tr><td>System</td><td colspan=\"2\">Sys. type Prec.</td><td>Rec.</td><td>F1</td></tr><tr><td>BB baseline</td><td>Zero-S.</td><td colspan=\"3\">0.811 0.787 0.788</td></tr><tr><td>IBM-MNLP 2.1</td><td>Few-S.</td><td colspan=\"3\">0.824 0.782 0.779</td></tr><tr><td>IBM-MNLP 2.2</td><td>Few-S.</td><td colspan=\"3\">0.817 0.797 0.797</td></tr><tr><td>IBM-MNLP 2.3</td><td>Few-S.</td><td colspan=\"3\">0.824 0.794 0.790</td></tr><tr><td>IBM-MNLP 2.4</td><td>Zero-S.</td><td colspan=\"3\">0.809 0.786 0.785</td></tr><tr><td>ICIP 2.1</td><td>Zero-S.</td><td colspan=\"3\">0.798 0.744 0.742</td></tr><tr><td>ICIP 2.2</td><td>Zero-S.</td><td colspan=\"3\">0.823 0.781 0.776</td></tr><tr><td>ICIP 2.3</td><td>Zero-S.</td><td colspan=\"3\">0.820 0.775 0.769</td></tr><tr><td>ICIP 2.4</td><td>Zero-S.</td><td colspan=\"3\">0.827 0.781 0.779</td></tr><tr><td>ICIP 2.5</td><td>Zero-S.</td><td colspan=\"3\">0.829 0.784 0.782</td></tr><tr><td>UNCC 2.1</td><td>Zero-S.</td><td colspan=\"3\">0.670 0.658 0.635</td></tr><tr><td>UNCC 2.2</td><td>Zero-S.</td><td colspan=\"3\">0.670 0.658 0.635</td></tr></table>" |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Overall performance overview Subtask 2: weighted average scores.", |
|
"html": null, |
|
"content": "<table><tr><td>System</td><td colspan=\"2\">Sys. type Prec.</td><td>Rec.</td><td>F1</td></tr><tr><td>BB baseline</td><td>Zero-S.</td><td colspan=\"3\">0.803 0.745 0.753</td></tr><tr><td>IBM-MNLP 3.1</td><td>Zero-S.</td><td colspan=\"3\">0.793 0.744 0.746</td></tr><tr><td>IBM-MNLP 3.2</td><td>Few-S.</td><td colspan=\"3\">0.787 0.755 0.756</td></tr><tr><td>IBM-MNLP 3.3</td><td>Zero-S.</td><td colspan=\"3\">0.793 0.744 0.746</td></tr><tr><td>IBM-MNLP 3.4</td><td>Few-S.</td><td colspan=\"3\">0.787 0.755 0.756</td></tr><tr><td>ICIP 3.1</td><td>Zero-S.</td><td colspan=\"3\">0.790 0.741 0.733</td></tr><tr><td>ICIP 3.2</td><td>Zero-S.</td><td colspan=\"3\">0.818 0.775 0.765</td></tr><tr><td>ICIP 3.3</td><td>Zero-S.</td><td colspan=\"3\">0.810 0.768 0.757</td></tr><tr><td>ICIP 3.4</td><td>Zero-S.</td><td colspan=\"3\">0.818 0.775 0.767</td></tr><tr><td>ICIP 3.5</td><td>Zero-S.</td><td colspan=\"3\">0.821 0.778 0.771</td></tr><tr><td>UNCC 3.1</td><td>Zero-S.</td><td colspan=\"3\">0.643 0.625 0.602</td></tr><tr><td>UNCC 3.2</td><td>Zero-S.</td><td colspan=\"3\">0.644 0.629 0.605</td></tr></table>" |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Overall performance overview Subtask 3: weighted average scores.", |
|
"html": null, |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |