|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:10:54.539047Z" |
|
}, |
|
"title": "Slav-NER: the 3 rd Cross-lingual Challenge on Recognition, Normalization, Classification, and Linking of Named Entities across Slavic languages", |
|
"authors": [ |
|
{ |
|
"first": "Jakub", |
|
"middle": [], |
|
"last": "Piskorski", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Polish Academy of Sciences", |
|
"location": { |
|
"settlement": "Warsaw", |
|
"country": "Poland" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Bogdan", |
|
"middle": [], |
|
"last": "Babych", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Heidelberg University", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Zara", |
|
"middle": [], |
|
"last": "Kancheva", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Olga", |
|
"middle": [], |
|
"last": "Kanishcheva", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Kharkiv Polytechnic Institute", |
|
"location": { |
|
"country": "Ukraine" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Lebedeva", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Micha\u0142", |
|
"middle": [], |
|
"last": "Marci\u0144czuk", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Wroc\u0142aw University of Science and Technology", |
|
"location": { |
|
"country": "Poland" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "HBKU", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Petya", |
|
"middle": [], |
|
"last": "Osenova", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Lidia", |
|
"middle": [], |
|
"last": "Pivovarova", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Helsinki", |
|
"location": { |
|
"country": "Finland" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Senja", |
|
"middle": [], |
|
"last": "Pollak", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Jozef Stefan Institute", |
|
"location": { |
|
"country": "Slovenia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Pavel", |
|
"middle": [], |
|
"last": "P\u0159ib\u00e1\u0148", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of West Bohemia", |
|
"location": { |
|
"country": "Czech Republic" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ivaylo", |
|
"middle": [], |
|
"last": "Radev", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Marko", |
|
"middle": [], |
|
"last": "Robnik-\u0160ikonja", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Ljubljana", |
|
"location": { |
|
"country": "Slovenia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Vasyl", |
|
"middle": [], |
|
"last": "Starko", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Ukrainian Catholic University", |
|
"location": { |
|
"settlement": "Lviv", |
|
"country": "Ukraine" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Steinberger", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of West Bohemia", |
|
"location": { |
|
"country": "Czech Republic" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Yangarber", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Helsinki", |
|
"location": { |
|
"country": "Finland" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper describes Slav-NER: the 3 rd Multilingual Named Entity Challenge in Slavic languages. The tasks involve recognizing mentions of named entities in Web documents, normalization of the names, and crosslingual linking. The Challenge covers six languages and five entity types, and is organized as part of the 8 th Balto-Slavic Natural Language Processing Workshop, co-located with the EACL 2021 Conference. Ten teams participated in the competition. Performance for the named entity recognition task reached 90% Fmeasure, much higher than reported in the first edition of the Challenge. Seven teams covered all six languages. Detailed evaluation information is available on the shared task web page.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper describes Slav-NER: the 3 rd Multilingual Named Entity Challenge in Slavic languages. The tasks involve recognizing mentions of named entities in Web documents, normalization of the names, and crosslingual linking. The Challenge covers six languages and five entity types, and is organized as part of the 8 th Balto-Slavic Natural Language Processing Workshop, co-located with the EACL 2021 Conference. Ten teams participated in the competition. Performance for the named entity recognition task reached 90% Fmeasure, much higher than reported in the first edition of the Challenge. Seven teams covered all six languages. Detailed evaluation information is available on the shared task web page.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Analyzing named entities (NEs) in Slavic languages poses a challenging problem, due to the rich inflection and derivation, free word order, and other morphological and syntactic phenomena exhibited in these languages (Przepi\u00f3rkowski, 2007; Piskorski et al., 2009) . Encouraging research on detection and normalization of NEs-and on the closely related problem of cross-lingual, crossdocument entity linking-is of paramount importance for improving multilingual and cross-lingual information access in these languages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 217, |
|
"end": 239, |
|
"text": "(Przepi\u00f3rkowski, 2007;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 240, |
|
"end": 263, |
|
"text": "Piskorski et al., 2009)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This paper describes the 3 rd Shared Task on multilingual NE recognition (NER), which aims at addressing these problems in a systematic way.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The shared task was organized in the context of the 8 th BSNLP: Balto-Slavic Natural Language Processing Workshop, co-located with the EACL 2021 conference. The task covers six languages-Bulgarian, Czech, Polish, Russian, Slovene and Ukrainian-and five types of NE: person, location, organization, product, and event. The input text collection consists of documents collected from the Web, each collection centered on a certain \"focal\" event. The rationale of such a setup is to foster the development of \"end-to-end\" NER and cross-lingual entity linking solutions, which are not tailored to specific, narrow domains. This paper also serves as an introduction and a guide for researchers wishing to explore these problems using the training and test data, which are released to the public. 1 The paper is organized as follows. Section 2 reviews prior work. Section 3 describes the task; Section 4 describes the annotation of the dataset. The evaluation methodology is introduced in Section 5. Participant systems are described in Section 6, and the results obtained by these systems are presented in Section 7. We present the conclusions and lessons learned in Section 8.", |
|
"cite_spans": [ |
|
{ |
|
"start": 790, |
|
"end": 791, |
|
"text": "1", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The work described here builds on the 1 st and 2 nd Shared Task on Multilingual Named Entity Recognition, Normalization and cross-lingual Match-ing for Slavic Languages, (Piskorski et al., 2017 (Piskorski et al., , 2019 , which, to the best of our knowledge, are the first attempts at such shared tasks covering multiple Slavic languages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 170, |
|
"end": 193, |
|
"text": "(Piskorski et al., 2017", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 194, |
|
"end": 219, |
|
"text": "(Piskorski et al., , 2019", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Prior Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "High-quality recognition and analysis of NEs is an essential step not only for information access, such as document retrieval and clustering, but it also constitutes a fundamental processing step in a wide range of NLP pipelines built for higher-level analysis of text, such as Information Extraction, see, e.g. (Huttunen et al., 2002) . Other NER-related shared tasks have been organized previously. The first non-English monolingual NER evaluations-covering Chinese, Japanese, Spanish, and Arabic-were held in the context of the Message Understanding Conferences (MUCs) (Chinchor, 1998) and the ACE Programme (Doddington et al., 2004) . The first multilingual NER shared task, which covered several European languages, including Spanish, German, and Dutch, was organized in the context of the CoNLL conferences (Tjong Kim Sang, 2002; Tjong Kim Sang and De Meulder, 2003) . The NE types covered in these campaigns were similar to the NE types covered in our Challenge. Worth mentioning in this context is Entity Discovery and Linking (EDL) (Ji et al., 2014 (Ji et al., , 2015 , a track of the NIST Text Analysis Conferences (TAC). EDL aimed to extract entity mentions from a collection of documents in multiple languages (English, Chinese, and Spanish), and to partition the entities into cross-document equivalence classes, by either linking mentions to a knowledge base or directly clustering them. An important difference between EDL and our task is that EDL required linking entities to a pre-existing knowledge base.", |
|
"cite_spans": [ |
|
{ |
|
"start": 312, |
|
"end": 335, |
|
"text": "(Huttunen et al., 2002)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 572, |
|
"end": 588, |
|
"text": "(Chinchor, 1998)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 611, |
|
"end": 636, |
|
"text": "(Doddington et al., 2004)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 820, |
|
"end": 835, |
|
"text": "Kim Sang, 2002;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 836, |
|
"end": 836, |
|
"text": "", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 843, |
|
"end": 873, |
|
"text": "Kim Sang and De Meulder, 2003)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 1042, |
|
"end": 1058, |
|
"text": "(Ji et al., 2014", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1059, |
|
"end": 1077, |
|
"text": "(Ji et al., , 2015", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Prior Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Related to cross-lingual NE recognition is NE transliteration, i.e., linking NEs across languages that use different scripts. A series of NE Transliteration Shared Tasks were organized as a part of NEWS-Named Entity Workshops- (Duan et al., 2016) , focusing mostly on Indian and Asian languages. In 2010, the NEWS Workshop included a shared task on Transliteration Mining (Kumaran et al., 2010) , i.e., mining of names from parallel corpora. This task included corpora in English, Chinese, Tamil, Russian, and Arabic.", |
|
"cite_spans": [ |
|
{ |
|
"start": 227, |
|
"end": 246, |
|
"text": "(Duan et al., 2016)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 372, |
|
"end": 394, |
|
"text": "(Kumaran et al., 2010)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Prior Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Research on NE focusing on Slavic languages includes tools for NE recognition for Croatian (Karan et al., 2013; Ljube\u0161i\u0107 et al., 2013) , NE recognition in Croatian tweets (Baksa et al., 2017) , a manually annotated NE corpus for Croatian (Agi\u0107 and Ljube\u0161i\u0107, 2014) , tools for NE recognition in Slovene (\u0160tajner et al., 2013; Ljube\u0161i\u0107 et al., 2013) , a Czech corpus of 11K annotated NEs (\u0160ev\u010d\u00edkov\u00e1 et al., 2007) , NER tools for Czech (Konkol and Konop\u00edk, 2013) , tools and resources for fine-grained annotation of NEs in the National Corpus of Polish (Waszczuk et al., 2010; Savary and Piskorski, 2011) , NER shared tasks for Polish organized under the umbrella of POLEVAL 2 evaluation campaigns (Ogrodniczuk and \u0141ukasz Kobyli\u0144ski, 2018, 2020) . and a recent shared task on NE Recognition in Russian (Starostin et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 111, |
|
"text": "(Karan et al., 2013;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 112, |
|
"end": 134, |
|
"text": "Ljube\u0161i\u0107 et al., 2013)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 171, |
|
"end": 191, |
|
"text": "(Baksa et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 238, |
|
"end": 263, |
|
"text": "(Agi\u0107 and Ljube\u0161i\u0107, 2014)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 302, |
|
"end": 324, |
|
"text": "(\u0160tajner et al., 2013;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 325, |
|
"end": 347, |
|
"text": "Ljube\u0161i\u0107 et al., 2013)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 386, |
|
"end": 410, |
|
"text": "(\u0160ev\u010d\u00edkov\u00e1 et al., 2007)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 433, |
|
"end": 459, |
|
"text": "(Konkol and Konop\u00edk, 2013)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 550, |
|
"end": 573, |
|
"text": "(Waszczuk et al., 2010;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 574, |
|
"end": 601, |
|
"text": "Savary and Piskorski, 2011)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 695, |
|
"end": 711, |
|
"text": "(Ogrodniczuk and", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 712, |
|
"end": 742, |
|
"text": "\u0141ukasz Kobyli\u0144ski, 2018, 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 799, |
|
"end": 823, |
|
"text": "(Starostin et al., 2016)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Prior Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The data for this edition of the shared task consists of sets of documents in six Slavic languages: Bulgarian, Czech, Polish, Russian, Slovene and Ukrainian. To accommodate entity linking, each set of documents is chosen to revolve around one certain entity-e.g., a person, an organization or an event. The documents were obtained from the Web, by posing a keyword query to a search engine or publicly available crawled data repositories, and extracting the textual content from the respective sources.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The task is to recognize, classify, and \"normalize\" all named-entity mentions in each of the documents, and to link across languages all named mentions referring to the same real-world entity. Formally, the Multilingual Named Entity Recognition task is subdivided into three sub-tasks:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Named Entity Mention Detection and Classification: Recognizing all named mentions of entities of five types: persons (PER), organizations (ORG), locations (LOC), products (PRO), and events (EVT).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Name Normalization: Mapping each named mention of an entity to its corresponding base form. By \"base form\" we generally mean the lemma (\"dictionary form\") of the inflected word-form. In some cases normalization should go beyond inflection and transform a derived word into a base word's lemma, e.g., in case of personal possessives (see below). Multi-word names should be normalized to the canonical multi-word expression-rather than a sequence of lemmas of the words making up the multiword expression.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Entity Linking. Assigning a unique identifier (ID) to each detected named mention of an entity, in such a way that mentions referring to the same real-world entity should be assigned the same ID-referred to as the cross-lingual ID.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The task does not require positional information of the name entity mentions. Thus, for all occurrences of the same form of a NE mention (e.g., an inflected variant, an acronym or abbreviation) within a given document, no more than one annotation should be produced. 3 Furthermore, distinguishing typographical case is not necessary since the evaluation is case-insensitive. If the text includes lowercase, uppercase or mixed-case variants of the same entity, the system should produce only one annotation for all of these mentions. For instance, for \"ISIS\" and \"isis\" (provided that they refer to the same NE type), only one annotation should be produced. The recognition of commonnoun or pronominal references to named entities does not constitute part of the task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The task defines the following five NE classes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Classes", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Person names (PER): Names of real (or fictional) persons). Person names should not include titles, honorifics, and functions/positions. For example, in the text fragment \". . . President Vladimir Putin. . . \", only \"Vladimir Putin\" is recognized as a person name. Both initials and pseudonyms are also considered named mentions of persons. Similarly, toponym-based named references to groups of people (that do not have a formal organization unifying them) should also be recognized, e.g., \"Germans.\" In this context, mentions of a single member belonging to such groups, e.g., \"German,\" should be assigned the same cross-lingual ID as plural mentions, i.e., \"Germans\" and \"German\" when referring to the nation receive the same cross-lingual ID.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Classes", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Named mentions of other groups of people that do have a formal organization unifying them should be tagged as PER, e.g., in the phrase \"Spart'an\u00e9 vyhr\u00e1li\" (Spartans won), \"Spart'an\u00e9 are to be tagged as PER.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Classes", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Personal possessives derived from a person's name should be classified as a Person, and the base form of the corresponding name should be extracted. For instance, in \"Trumpov tweet\" (Croatian) one is expected to classify \"Trumpov\" as PER, with the base form \"Trump.\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Classes", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Locations (LOC): All toponyms and geopolitical entities-cities, counties, provinces, countries, regions, bodies of water, land formations, etc.including named mentions of facilities-e.g., stadiums, parks, museums, theaters, hotels, hospitals, transportation hubs, churches, streets, railroads, bridges, and similar facilities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Classes", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In case named mentions of facilities also refer to an organization, the LOC tag should be used. For example, from the text \"San Rafaelle Hospital hired new staff due to Covid-19 pandemic\" the mention \"San Rafaelle Hospital\" should be classified as LOC.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Classes", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Organizations (ORG): All organizations, including companies, public institutions, political parties, international organizations, religious organizations, sport organizations, educational and research institutions, etc.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Classes", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Organization designators and potential mentions of the seat of the organization are considered to be part of the organization name. For instance, from the text \"...Zak\u0142ad Ubezpiecze\u0144 Spo\u0142ecznych w Bydgoszczy...\" (The Social Insurance Institution in Bydgoszcz), the full phrase \"Zak\u0142ad Ubezpiecze\u0144 Spo\u0142ecznych w Bydgoszczy\" should be extracted.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Classes", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Products (PRO): All names of products and services, such as electronics (\"Samsung Galaxy A41\"), cars (\"Honda Pilot\"), newspapers (\"Der Spiegel\"), web-services (\"Pintertest\"), medicines (\"Oxycodone\"), awards (\"Pulitzer Prize\"), books (\"Animal Farm\"), TV programmes (\"Wiadomo\u015bci TVP\"), etc.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Classes", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "When a company name is used to refer to a service, e.g., \"na Instagramie\" (Polish for \"on Instagram\"), the mention of \"Instagramie\" is considered to refer to a service/product and should be tagged as PRO. However, when a company name refers to a service, expressing an opinion of the company, it should be tagged as ORG.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Classes", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "This category also includes legal documents and treaties, e.g., \"Uk\u0142ad z Schengen\" (Pol- ish: \"Schengen Agreement\") and initiatives, e.g., \"Horizon 2020\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Classes", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "This category covers named mentions of events, including conferences, e.g. \"24. Konference \u017d\u00e1rovn\u00e9ho Zinkov\u00e1n\u00ed\" (Czech: \"Hot Galvanizing Conference\"), concerts, festivals, holidays, e.g., \"\u015awi\u0119ta Bo\u017cego Narodzenia\" (Polish: \"Christmas\"), wars, battles, disasters, e.g., \"Katastrofa Smole\u0144ska\" (Polish: \"the Smole\u0144sk air disaster\"), outbreaks of infectious diseases (\"Spanish Flu\"). Future, speculative, and fictive events-e.g., \"'Polexit\"-are considered event mentions too.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Events (EVT):", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In case of complex named entities, consisting of nested named entities, only the top-most entity should be recognized. For example, from the text \"Universit\u00e0 Commerciale Luigi Bocconi\" one should not extract \"Luigi Bocconi\", but only the top-level entity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Complex and Ambiguous Entities", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In case one word-form (e.g., \"Georgia\") is used to refer to more than one different real-world entities in different contexts in the same document (e.g., a person and a location), two annotations should be returned, associated with different cross-lingual IDs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Complex and Ambiguous Entities", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In case of coordinated phrases, like \"European and German Parliament,\" two names should be extracted (as ORG). The lemmas would be \"European\" and \"German Parliament\", and the IDs should refer to \"European Parliament\" and \"German Parliament\" respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Complex and Ambiguous Entities", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In rare cases, plural forms might have two annotations-e.g., in the phrase \"a border between Irelands\"-\"Irelands\" should be extracted twice with identical lemmas but different IDs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Complex and Ambiguous Entities", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Input Document Format: Documents in the collection are represented in the following format. The first five lines contain the following metadata (in the respective order): <DOCUMENT-ID>, <LANGUAGE>, <CREATION-DATE>, <URL>, <TITLE>, <TEXT>. The text to be processed begins from the sixth line and runs till the end of file. The <URL> field stores the origin from which the text document was retrieved. The values of <CREATION-DATE> and <TITLE> were not provided for all documents, due to unavailability of such data or due to errors in parsing during data collection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Input and Response", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "System Response. For each input file, the system should return one output file as follows. The first line should contain only the <DOCUMENT-ID>, which corresponds to the input. Each subsequent line contains one annotation, as tab-separated fields:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Input and Response", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "<MENTION> TAB <BASE> TAB <CAT> TAB <ID>", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Input and Response", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The <MENTION> field should be the NE as it appears in text. The <BASE> field should be the base form of the entity. The <CAT> field stores the category of the entity (ORG, PER, LOC, PRO, or EVT) and <ID> is the cross-lingual identifier. The cross-lingual identifiers may consist of an arbitrary sequence of alphanumeric characters. An example document in Czech and the corresponding response is shown in Figure 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 404, |
|
"end": 412, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "System Input and Response", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The detailed descriptions of the tasks are available on the web page of the Shared Task. 4", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Input and Response", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "For Russian, Polish, Czech and Bulgarian, the training and test data sets from the 2019 Shared Task were used as training data for 2021. For the new languages-Ukrainian and Slovene-new training sets were annotated. The test data in all six languages covered two major current topics: the COVID-19 pandemic and the 2020 USA Presidential elections (USA 2020 ELECTIONS).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The 2019 training data consist of four sets of documents extracted from the Web, each related to a given focus entity. We tried to choose entities related to events in 2018 and 2019 covered in mainstream news in many languages. ASIA BIBI, which relates to a Pakistani woman involved in a blasphemy case, BREXIT, RYANAIR, which faced a massive strike, and NORD STREAM, a controversial Russian-European project.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Each dataset was created as follows. For the focus entity, we posed a search query to Google and/or publicly available crawled data repositories, in each of the target languages. The query returned documents in the target language. We removed duplicates, downloaded the HTMLmainly news articles-and converted them into plain text. Since the result of HTML parsing may include not only the main text of a Web page, but also spurious text, some additional manual cleaning was applied whenever necessary. The resulting set of \"cleaned\" documents were used to manually select documents for each language and topic, for the final datasets. Documents were annotated using the Inforex 5 web-based system for annotation of text corpora (Marci\u0144czuk et al., 2017) . Inforex allows parallel access and resource sharing by multiple annotators. It let us share a common list of entities, and perform entity-linking semi-automatically: for a given entity, an annotator sees a list of entities of the same type inserted by all annotators and can select an entity ID from the list. A snapshot of the Inforex interface is in Figure 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 728, |
|
"end": 753, |
|
"text": "(Marci\u0144czuk et al., 2017)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1108, |
|
"end": 1116, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In addition, Inforex keeps track of all lemmas and IDs inserted for each surface form, and inserts them automatically, so in many cases the annotator only confirms the proposed values, which speeds up the annotation process a great deal. All annotations were made by native speakers. After annotation, we performed automatic and manual consistency checks, to reduce annotation errors, especially in entity linking.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Training and test data statistics are presented in Table 1 and 2 respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 51, |
|
"end": 58, |
|
"text": "Table 1", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The testing datasets-COVID-19 and USA 2020 ELECTIONS-were released to the participants who were given circa 2 days to return up to 5 system responses. The participants did not know the topics in advance, and did not receive the annotations. The main drive behind this decision was to push participants to build a general solution for Slavic NER, rather than to optimize their models toward a particular set of names.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The NER task (exact case-insensitive matching) and Name Normalization (or \"lemmatization\") were evaluated in terms of precision, recall, and F1-measure. For NER, two types of evaluations were carried out: Documents 500 284 153 600 52 50 88 89 118 101 4 6 151 161 150 130 74 40 146 163 150 87 52 63 PER 2 650 1 108 1 308 2 515 532 242 683 570 643 583 36 39 538 570 392 335 548 78 136 161 72 147 107 33 LOC 3 524 1 279 666 2 407 403 336 403 366 567 388 24 57 1 430 1 689 1 320 910 1 362 339 821 871 902 344 384 455 ORG 3 080 1 039 828 2 455 301 166 286 214 419 245 10 30 837 477 792 540 460 449 529 707 500 238 408 193 EVT 1 072 471 261 776 165 62 14 3 1 8 0 0 15 9 5 6 50 14 7 12 0 4 8 0 PRO 668 232 137 490 31 17 55 42 49 63 2 1 405 364 510 331 243 8 114 66 82 79 101 20 Total 10 994 4 129 3 200 8 643 1 445 823 1 441 1 195 1 679 1 287 72 127 3 225 3 116 3 020 2 122 2 664 948 1 607 1 817 1 556 812 1008 701 Distinct Surface forms 2 820 1 111 783 1 200 596 234 508 303 406 412 51 87 845 770 892 504 902 336 514 475 400 323 673 \u2022 Relaxed: An entity mentioned in a given document is considered to be extracted correctly if the system response includes at least one annotation of a named mention of this entity (regardless of whether the extracted mention is in base form);", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 205, |
|
"end": 1174, |
|
"text": "Documents 500 284 153 600 52 50 88 89 118 101 4 6 151 161 150 130 74 40 146 163 150 87 52 63 PER 2 650 1 108 1 308 2 515 532 242 683 570 643 583 36 39 538 570 392 335 548 78 136 161 72 147 107 33 LOC 3 524 1 279 666 2 407 403 336 403 366 567 388 24 57 1 430 1 689 1 320 910 1 362 339 821 871 902 344 384 455 ORG 3 080 1 039 828 2 455 301 166 286 214 419 245 10 30 837 477 792 540 460 449 529 707 500 238 408 193 EVT 1 072 471 261 776 165 62 14 3 1 8 0 0 15 9 5 6 50 14 7 12 0 4 8 0 PRO 668 232 137 490 31 17 55 42 49 63 2 1 405 364 510 331 243 8 114 66 82 79 101 20 Total 10 994 4 129 3 200 8 643 1 445 823 1 441 1 195 1 679 1 287 72 127 3 225 3 116 3 020 2 122 2 664 948 1 607 1 817 1 556 812 1008 701 Distinct Surface forms 2 820 1 111 783 1 200 596 234 508 303 406 412 51 87 845 770 892 504 902 336 514 475 400 323 673", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Methodology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 Strict: The system response should include exactly one annotation for each unique form of a named mention of an entity in a given document, i.e., identifying all variants of an entity is required.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methodology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In relaxed evaluation we additionally distinguish between exact and partial matching: in the latter case, an entity mentioned in a given document is considered to be extracted correctly if the system response includes at least one partial match of a named mention of this entity. We evaluate systems at several levels of granularity: we measure performance for (a) all NE types and all languages, (b) each given NE type and all languages, (c) all NE types for each language, and (d) each given NE type per language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methodology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In the name normalization task, we take into account only correctly recognized entity mentions and only those that were normalized (on both the annotation and system's sides). Formally, let N correct denote the number of all correctly recognized entity mentions for which the system returned a correct base form. Let N key denote the number of all normalized entity mentions in the gold-standard answer key and N response denote the number of all normalized entity mentions in the system's response. We define precision and recall for the name normalization task as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methodology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Recall = N corrrect N key Precision = N corrrect N response", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methodology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In evaluating document-level, single-language and cross-lingual entity linking we adopted the Link-Based Entity-Aware metric (LEA) (Moosavi and Strube, 2016), which considers how important the entity is and how well it is resolved. LEA is defined as follows. Let K = {k 1 , k 2 , . . . , k |K| } denote the set of key entities and R = {r 1 , r 2 , . . . , r |R| } the set of response entities, i.e., k i \u2208 K (r i \u2208 R) stand for set of mentions of the same entity in the key entity set (response entity set). LEA recall and precision are then defined as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methodology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Recall LEA = k i \u2208K imp(k i ) \u2022 res(k i ) kz\u2208K imp(k z ) Precision LEA = r i \u2208R imp(r i ) \u2022 res(r i ) rz\u2208R imp(r z )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methodology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "where imp and res denote the measure of importance and the resolution score for an entity, respectively. In our setting, we define imp(e) = log 2 |e| for an entity e (in K or R), |e| is the number of mentions of e-i.e., the more mentions an entity has the more important it is. To avoid biasing the importance of the more frequent entities log is used. The resolution score of key entity k i is computed as the fraction of correctly resolved coreference links of k i :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methodology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "res(k i ) = r j \u2208R link (k i \u2229 r j ) link (k i )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methodology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "where link (e) = (|e| \u00d7 (|e| \u2212 1))/2 is the number of unique co-reference links in e. For each k i , LEA checks all response entities to check whether they are partial matches for k i . Analogously, the resolution score of response entity r i is computed as the fraction of co-reference links in r i that are extracted correctly:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methodology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "res(r i ) = k j \u2208K link (r i \u2229 k j ) link (r i )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methodology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "LEA brings several benefits. For example, LEA considers resolved co-reference relations instead of resolved mentions and has more discriminative power than other metrics for co-reference resolution (Moosavi and Strube, 2016).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methodology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The evaluation was carried out in \"caseinsensitive\" mode: all named mentions in system response and test corpora were lower-cased.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Methodology", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Six teams submitted descriptions of their systems as BSNLP Workshop papers. We briefly review these systems here; for complete descriptions, please see the corresponding papers. Two additional teams submitted their results with short descriptions of their systems, which appear in this section.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participant Systems", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The UL FRI system, (Prelevikj and Zitnik, 2021) , generated results for several settings, models and languages, although the team's main motivation is to develop effective NER tools for Slovenian. The system uses contemporary BERT and RoBERTa multilingual pre-trained models, which include Slovene among other languages. The system was further trained on the SlavNER dataset for the NER task and used the Dedupe method for the Entity Matching task. The best performing models were pre-trained on Slovene. The results also indicate that two-step prediction of NE could be beneficial. The team made their code publicly available.", |
|
"cite_spans": [ |
|
{ |
|
"start": 19, |
|
"end": 47, |
|
"text": "(Prelevikj and Zitnik, 2021)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participant Systems", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The Priberam Labs system, (Ferreira et al., 2021) , focuses on the NER task. It uses three components: a multilingual contextual embedding model, a character-level embedding model, and a bi-affine classifier model. The paper reports results for different multilingual contextual embedding models, which included Multilingual BERT, XLM-RoBERTa, or the Slavic BERT. For different languages the best-performing models where different, but having the same language within the large pre-trained model usually improved the results-e.g., Slavic BERT, which used additional resources for Bulgarian, Russian and Polish, also performed best for these languages. The system uses heuristics to predict and resolve spans of NEs, and in this way it is able to tag overlapping entities. The code for the system is made available.", |
|
"cite_spans": [ |
|
{ |
|
"start": 26, |
|
"end": 49, |
|
"text": "(Ferreira et al., 2021)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participant Systems", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The TLD system, (V\u012bksna and Skadina, 2021) , uses a staged approach. The first stage is identification of NEs in context, which is treated as a sequence labeling problem and is performed by a multilingual BERT model from Google, modified by the team. Entity linking is the second stage, which uses a list of LaBSE embeddings; matched entries need to pass a pre-defined threshold of cosine similarity with existing entries; otherwise they are added as new values to the list. The third stage is normalisation of identified entities, which is performed using models provided with Stanza.", |
|
"cite_spans": [ |
|
{ |
|
"start": 16, |
|
"end": 42, |
|
"text": "(V\u012bksna and Skadina, 2021)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participant Systems", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The L3i system, (Cabrera-Diego et al., 2021), combines BERT models with the \"Frustratingly Easy\" domain adaptation algorithm. It also uses other techniques to improve system's NER performance, such as marking and enrichment of uppercase tokens, prediction of NE boundaries with a multitask approach, prediction of masked tokens, fine-tuning the language model to the domain of the document.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participant Systems", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The TraSpaS system, (Suppa and Jariabka, 2021) , tests the assumption that the universal open-source NLP toolkits (such as SpaCy, Stanza or Trankit) could achieve competitive performance on the Multilingual NER task, using large pretrained Transformer-based language models available from HuggingfaceTransformers, which have not been available in previous editions of the Shared Task. The team tests the generalizability of the models to new low-resourced domains, and to languages such as Slovene and Ukrainian.", |
|
"cite_spans": [ |
|
{ |
|
"start": 20, |
|
"end": 46, |
|
"text": "(Suppa and Jariabka, 2021)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participant Systems", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The UWr-VL system, (Rychlikowski et al., 2021) , utilizes large collections of unstructured and structured documents for unsupervised training of embedding of lexical units and for recog-nizing and linking multiple real-world NEs. In particular, the team makes use of CommonCrawl news articles, Wikipedia, and its structured counterpart Wikidata as knowledge sources, to address the problem of data scarcity, building neural gazetteer via collecting different embeddings from these knowledge sources. The system further uses standard neural approaches to the NER task, with a RNN classifier, in order to determine for every input word the probability of labelling it with various beginning and end NE tags.", |
|
"cite_spans": [ |
|
{ |
|
"start": 19, |
|
"end": 46, |
|
"text": "(Rychlikowski et al., 2021)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participant Systems", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Two more systems generated the results for the shared task-CTC-NER from the Cognitive Technologies Center team, and PAISC_wxd:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participant Systems", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "CTC-NER is a baseline prototype of a NER component of an entity recognition system currently under development at the Cognitive Technologies Center. The system has a hybrid architecture combining rule-based and ML techniques; the ML-component is loosely related to (Antonova and Soloviev, 2013) . The languages currently processed include Russian, English and Ukrainian.", |
|
"cite_spans": [ |
|
{ |
|
"start": 265, |
|
"end": 294, |
|
"text": "(Antonova and Soloviev, 2013)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participant Systems", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "PAISC_wxd uses the XLM-Roberta model, followed by BiLSTM-CRF on top. In addition, the system uses data enhancement based on machine translation. Figure 3 shows the performance of the systems averaged across all languages and both test corpora. For each team that provided a solution for all six languages (7 teams except CTC-NER), we present the best scores (F 1, P recision, and Recall) obtained by the team in three evaluation modes. 6 As the plots show, the best performing model, Priberam, yields F-measure 85.7% according to the relaxed partial evaluation, and 79.3% according to the strict evaluation. The Priberam submission scores highest in precision -89,4% relaxed partial, and 85.1% strict -but much lower in recall -82.2% relaxed partial, and 74.3% strict.", |
|
"cite_spans": [ |
|
{ |
|
"start": 436, |
|
"end": 437, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 145, |
|
"end": 153, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Participant Systems", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Among the teams that submitted results for cross-lingual entity linking, only two achieved results comparable with the benchmarks achieved on the Second Challenge, and this year's results surpass those benchmarks by a substantial margin. The best results for each team, averaged across two corpora, are shown in Table 3 . These results 6 Complete results available on the Workshop's Web page: bsnlp.cs.helsinki.fi/final-rank-2021.pdf show that this task is much more difficult than entity extraction. The best performing model, TLD, achieves F-measure 50.4%.", |
|
"cite_spans": [ |
|
{ |
|
"start": 336, |
|
"end": 337, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 312, |
|
"end": 319, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Note that in our setting the performance on entity linking depends on the performance on name recognition and normalization: each system had to link entities that it had extracted from documents upstream, rather than link a set of correct entities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Tables 4 and 5 present the F1-measures separated by language, for all tasks for the COVID-19 and USA 2020 ELECTIONS data sets These tables show only the top-performing model for each team. For recognition, we show only the relaxed evaluation, since the results obtained on the three evaluation schemes are correlated, as can be seen from Figure 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 338, |
|
"end": 346, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The tables indicate some variation in scores obtained on the test corpora This variation could be due to a number of factors, including actual differences in the test data, as well as differences in annotation across languages. This variation should and will be investigated in greater depth.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "In Table 6 we present the results of the evaluation by entity type. As seen in the table, performance was higher overall for LOC and PER, and substantially lower for ORG and PRO, which corresponds with our findings from the previous editions of the shared task, where ORG and MISC were the most problematic categories (Piskorski et al., 2017) . The PRO category also exhibits higher variance across languages and corpora than other categories, which might point to possible annotation artefacts. The results for the EVT category are less informative, since the task heavily depends on detecting the repeated central events of the corpora.", |
|
"cite_spans": [ |
|
{ |
|
"start": 318, |
|
"end": 342, |
|
"text": "(Piskorski et al., 2017)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "This paper reports on the 3 rd Multilingual Named Entity Challenge focusing on recognizing mentions of NEs in Web documents in six Slavic languages, normalization of the NEs, and crosslingual entity linking. The Challenge has attracted substantial interest, following the prior Challenges in 2017 and 2019, with 10 teams registering for the competition and eight teams submitting results from working systems, with multiple variants. Most systems use state-of-the-art neural network models. Overall, the results of the bestperforming systems are quite strong for extraction and normalization, while cross-lingual linking is the most challenging of the tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "We show summary results for the main aspects of the challenge and the best-performing model for each team. For detailed, in-depth evaluations of all participating systems and their performance please consult the Shared Task's Web page and the papers by the respective teams.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "To stimulate further research into NLP for Slavic languages, including cross-lingual entity linking, our training and test datasets, the detailed annotations, and scripts used for evaluations are made available to the public on the Shared Task's Web page. 7 The annotation interface is released by the Inforex team, to support further annotation of additional data for future tests.", |
|
"cite_spans": [ |
|
{ |
|
"start": 256, |
|
"end": 257, |
|
"text": "7", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "This challenge covered six Slavic languages. For future editions of the Challenge, we plan to expand the data sets, covering a wider range of entity types, and supporting cross-lingual entity linking. We plan to expand the training and test data to include non-Slavic languages. We will also undertake further refinement of the underlying annotation guidelines-a highly complex task in a real-world setting. More complex phenomena also need to be addressed, e.g., coordinated NEs, contracted versions of multiple NEs, etc.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "We believe that the reported results and the annotated datasets will help stimulate further research on robust, end-to-end analysis of real-world texts in Slavic languages. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "bsnlp.cs.helsinki.fi/shared_task.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http:\\\\poleval.pl", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Unless the different occurrences have different entity types (different readings) assigned to them, which is rare.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://bsnlp.cs.helsinki.fi/System_ response_guidelines-1.2.pdf", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "github.com/CLARIN-PL/Inforex", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "bsnlp.cs.helsinki.fi/shared_task.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "Work on Bulgarian was in part supported by the Bulgarian National Interdisciplinary Research e-Infrastructure for Resources and Technologies for the Bulgarian Language and Cultural Heritage, part of the EU infrastructures CLARIN and DARIAH -CLaDA-BG, Grant number DO1-377/18.12.2020.Work on Czech was in part supported by ERDF \"Research and Development of Intelligent Components of Advanced Technologies for the Pilsen Metropolitan Area (InteCom)\" (no. CZ.02.1.01/0.0/0.0/17 048/0007267), and by Grant No. SGS-2019-018 \"Processing of heterogeneous data and its specialized applications.\"Work on Inforex and on Polish was supported in part by investment in the CLARIN-PL research infrastructure funded by the Polish Ministry of Science and Higher Education.We thank the students of Pushkin State Russian Language Institute for their assistance with annotation of Russian data. This work has been partially supported by the European Union Horizon 2020 research and in-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "The SE-Times.HR linguistically annotated corpus of Croatian", |
|
"authors": [ |
|
{ |
|
"first": "\u017deljko", |
|
"middle": [], |
|
"last": "Agi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikola", |
|
"middle": [], |
|
"last": "Ljube\u0161i\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Ninth International Conference on Language Resources and Evaluation (LREC 2014)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1724--1727", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "\u017deljko Agi\u0107 and Nikola Ljube\u0161i\u0107. 2014. The SE- Times.HR linguistically annotated corpus of Croa- tian. In Ninth International Conference on Lan- guage Resources and Evaluation (LREC 2014), pages 1724-1727, Reykjav\u00edk, Iceland.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Conditional random field models for the processing of Russian", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Antonova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Soloviev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Computational Linguistics and Intellectual Technologies: Papers From the Annual Conference \"Dialogue\"(Bekasovo", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "27--44", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "AY Antonova and AN Soloviev. 2013. Conditional random field models for the processing of Russian. In Computational Linguistics and Intellectual Tech- nologies: Papers From the Annual Conference \"Di- alogue\"(Bekasovo, 29 May-2 June 2013), volume 1, pages 27-44.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Tagging named entities in Croatian tweets. Sloven\u0161\u010dina 2.0: empirical, applied and interdisciplinary research", |
|
"authors": [ |
|
{ |
|
"first": "Kre\u0161imir", |
|
"middle": [], |
|
"last": "Baksa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dino", |
|
"middle": [], |
|
"last": "Golovi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Goran", |
|
"middle": [], |
|
"last": "Glava\u0161", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "20--41", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kre\u0161imir Baksa, Dino Golovi\u0107, Goran Glava\u0161, and Jan \u0160najder. 2017. Tagging named entities in Croatian tweets. Sloven\u0161\u010dina 2.0: empirical, applied and in- terdisciplinary research, 4(1):20-41.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Using a frustratingly easy domain and tagset adaptation for creating slavic named entity recognition systems", |
|
"authors": [ |
|
{ |
|
"first": "Jose", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Luis Adri\u00e1n Cabrera-Diego", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Moreno", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Doucet", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 8th Workshop on Balto-Slavic Natural Language Processing. European Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luis Adri\u00e1n Cabrera-Diego, Jose G. Moreno, and An- toine Doucet. 2021. Using a frustratingly easy domain and tagset adaptation for creating slavic named entity recognition systems. In Proceedings of the 8th Workshop on Balto-Slavic Natural Lan- guage Processing. European Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Overview of MUC-7/MET-2", |
|
"authors": [ |
|
{ |
|
"first": "Nancy", |
|
"middle": [], |
|
"last": "Chinchor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of Seventh Message Understanding Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nancy Chinchor. 1998. Overview of MUC-7/MET-2. In Proceedings of Seventh Message Understanding Conference (MUC-7).", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "The Automatic Content Extraction (ACE) program-tasks, data, and evaluation", |
|
"authors": [ |
|
{ |
|
"first": "George", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Doddington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Przybocki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lance", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Ramshaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephanie", |
|
"middle": [], |
|
"last": "Strassel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Weischedel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Fourth International Conference on Language Resources and Evaluation (LREC 2004)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "837--840", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George R. Doddington, Alexis Mitchell, Mark A. Przy- bocki, Lance A. Ramshaw, Stephanie Strassel, and Ralph M. Weischedel. 2004. The Automatic Con- tent Extraction (ACE) program-tasks, data, and evaluation. In Fourth International Conference on Language Resources and Evaluation (LREC 2004), pages 837-840, Lisbon, Portugal.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Report of NEWS 2016 machine transliteration shared task", |
|
"authors": [ |
|
{ |
|
"first": "Xiangyu", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rafael", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Banchs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haizhou", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Kumaran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of The Sixth Named Entities Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "58--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiangyu Duan, Rafael E. Banchs, Min Zhang, Haizhou Li, and A. Kumaran. 2016. Report of NEWS 2016 machine transliteration shared task. In Proceedings of The Sixth Named Entities Workshop, pages 58-72, Berlin, Germany.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Priberam labs at the 3rd shared task on", |
|
"authors": [ |
|
{ |
|
"first": "Pedro", |
|
"middle": [], |
|
"last": "Ferreira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruben", |
|
"middle": [], |
|
"last": "Cardoso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Afonso", |
|
"middle": [], |
|
"last": "Mendes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pedro Ferreira, Ruben Cardoso, and Afonso Mendes. 2021. Priberam labs at the 3rd shared task on", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Recognition F1-measure (relaxed partial) by entity type-best-performing systems for each language. slavner", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Proceedings of the 8th Workshop on Balto-Slavic Natural Language Processing", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Table 6: Recognition F1-measure (relaxed partial) by entity type-best-performing systems for each language. slavner. In Proceedings of the 8th Workshop on Balto-Slavic Natural Language Processing. Euro- pean Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Diversity of scenarios in information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Silja", |
|
"middle": [], |
|
"last": "Huttunen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Yangarber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the Third International Conference on Language Resources and Evaluation (LREC 2002)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Silja Huttunen, Roman Yangarber, and Ralph Grish- man. 2002. Diversity of scenarios in information extraction. In Proceedings of the Third International Conference on Language Resources and Evaluation (LREC 2002), Las Palmas, Spain.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Overview of TAC-KBP2014 entity discovery and linking tasks", |
|
"authors": [ |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Nothman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Hachey", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of Text Analysis Conference (TAC2014)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1333--1339", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Heng Ji, Joel Nothman, and Ben Hachey. 2014. Overview of TAC-KBP2014 entity discovery and linking tasks. In Proceedings of Text Analysis Con- ference (TAC2014), pages 1333-1339.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Overview of TAC-KBP2015 tri-lingual entity discovery and linking", |
|
"authors": [ |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Nothman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Hachey", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of Text Analysis Conference (TAC2015)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Heng Ji, Joel Nothman, and Ben Hachey. 2015. Overview of TAC-KBP2015 tri-lingual entity dis- covery and linking. In Proceedings of Text Analysis Conference (TAC2015).", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Jure Miji\u0107, Artur \u0160ili\u0107, and Bojana Dalbelo Ba\u0161i\u0107. 2013. CroNER: Recognizing named entities in Croatian using conditional random fields. Informatica", |
|
"authors": [ |
|
{ |
|
"first": "Mladen", |
|
"middle": [], |
|
"last": "Karan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Goran", |
|
"middle": [], |
|
"last": "Glava\u0161", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frane", |
|
"middle": [], |
|
"last": "\u0160ari\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "37", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mladen Karan, Goran Glava\u0161, Frane \u0160ari\u0107, Jan \u0160na- jder, Jure Miji\u0107, Artur \u0160ili\u0107, and Bojana Dalbelo Ba\u0161i\u0107. 2013. CroNER: Recognizing named entities in Croatian using conditional random fields. Infor- matica, 37(2):165.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "CRFbased Czech named entity recognizer and consolidation of Czech NER research", |
|
"authors": [ |
|
{ |
|
"first": "Michal", |
|
"middle": [], |
|
"last": "Konkol", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miloslav", |
|
"middle": [], |
|
"last": "Konop\u00edk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Text, Speech and Dialogue", |
|
"volume": "8082", |
|
"issue": "", |
|
"pages": "153--160", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michal Konkol and Miloslav Konop\u00edk. 2013. CRF- based Czech named entity recognizer and consoli- dation of Czech NER research. In Text, Speech and Dialogue, volume 8082 of Lecture Notes in Com- puter Science, pages 153-160. Springer Berlin Hei- delberg.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Report of NEWS 2010 transliteration mining shared task", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Kumaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Mitesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haizhou", |
|
"middle": [], |
|
"last": "Khapra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 2010 Named Entities Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "21--28", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A Kumaran, Mitesh M. Khapra, and Haizhou Li. 2010. Report of NEWS 2010 transliteration mining shared task. In Proceedings of the 2010 Named Entities Workshop, pages 21-28, Uppsala, Sweden.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Combining available datasets for building named entity recognition models of Croatian and Slovene. Sloven\u0161\u010dina 2.0: empirical, applied and interdisciplinary research", |
|
"authors": [ |
|
{ |
|
"first": "Nikola", |
|
"middle": [], |
|
"last": "Ljube\u0161i\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marija", |
|
"middle": [], |
|
"last": "Stupar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tereza", |
|
"middle": [], |
|
"last": "Juri\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u017deljko", |
|
"middle": [], |
|
"last": "Agi\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "35--57", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikola Ljube\u0161i\u0107, Marija Stupar, Tereza Juri\u0107, and \u017deljko Agi\u0107. 2013. Combining available datasets for building named entity recognition models of Croat- ian and Slovene. Sloven\u0161\u010dina 2.0: empirical, ap- plied and interdisciplinary research, 1(2):35-57.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Inforex -a collaborative system for text corpora annotation and analysis", |
|
"authors": [ |
|
{ |
|
"first": "Micha\u0142", |
|
"middle": [], |
|
"last": "Marci\u0144czuk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Oleksy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the International Conference Recent Advances in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "473--482", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Micha\u0142 Marci\u0144czuk, Marcin Oleksy, and Jan Koco\u0144. 2017. Inforex -a collaborative system for text cor- pora annotation and analysis. In Proceedings of the International Conference Recent Advances in Natu- ral Language Processing, RANLP 2017, Varna, Bul- garia, September 2-8, 2017, pages 473-482. IN- COMA Ltd.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Which coreference evaluation metric do you trust? A proposal for a link-based entity aware metric", |
|
"authors": [ |
|
{ |
|
"first": "Sadat", |
|
"middle": [], |
|
"last": "Nafise", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Moosavi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Strube", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (ACL 2016)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "632--642", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nafise Sadat Moosavi and Michael Strube. 2016. Which coreference evaluation metric do you trust? A proposal for a link-based entity aware metric. In Proceedings of the 54th Annual Meeting of the Asso- ciation for Computational Linguistics (ACL 2016), pages 632-642, Berlin, Germany.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Proceedings of the PolEval", |
|
"authors": [], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maciej Ogrodniczuk and \u0141ukasz Kobyli\u0144ski, editors. 2018. Proceedings of the PolEval 2018 Workshop. Institute of Computer Science, Polish Academy of Sciences, Warsaw, Poland.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "2020. Proceedings of the PolEval 2020 Workshop. Institute of Computer Science", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maciej Ogrodniczuk and \u0141ukasz Kobyli\u0144ski, editors. 2020. Proceedings of the PolEval 2020 Workshop. Institute of Computer Science, Polish Academy of Sciences, Warsaw, Poland.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "The second crosslingual challenge on recognition, normalization, classification, and linking of named entities across Slavic languages", |
|
"authors": [ |
|
{ |
|
"first": "Jakub", |
|
"middle": [], |
|
"last": "Piskorski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laska", |
|
"middle": [], |
|
"last": "Laskova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Micha\u0142", |
|
"middle": [], |
|
"last": "Marci\u0144czuk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lidia", |
|
"middle": [], |
|
"last": "Pivovarova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pavel", |
|
"middle": [], |
|
"last": "P\u0159ib\u00e1\u0148", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Steinberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Yangarber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 7th Workshop on Balto-Slavic Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "63--74", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jakub Piskorski, Laska Laskova, Micha\u0142 Marci\u0144czuk, Lidia Pivovarova, Pavel P\u0159ib\u00e1\u0148, Josef Steinberger, and Roman Yangarber. 2019. The second cross- lingual challenge on recognition, normalization, classification, and linking of named entities across Slavic languages. In Proceedings of the 7th Work- shop on Balto-Slavic Natural Language Processing, pages 63-74, Florence, Italy. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "The first cross-lingual challenge on recognition, normalization and matching of named entities in Slavic languages", |
|
"authors": [ |
|
{ |
|
"first": "Jakub", |
|
"middle": [], |
|
"last": "Piskorski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lidia", |
|
"middle": [], |
|
"last": "Pivovarova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "\u0160najder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Steinberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Yangarber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 6th Workshop on Balto-Slavic Natural Language Processing. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jakub Piskorski, Lidia Pivovarova, Jan \u0160najder, Josef Steinberger, and Roman Yangarber. 2017. The first cross-lingual challenge on recognition, normaliza- tion and matching of named entities in Slavic lan- guages. In Proceedings of the 6th Workshop on Balto-Slavic Natural Language Processing. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "On knowledge-poor methods for person name matching and lemmatization for highly inflectional languages", |
|
"authors": [ |
|
{ |
|
"first": "Jakub", |
|
"middle": [], |
|
"last": "Piskorski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karol", |
|
"middle": [], |
|
"last": "Wieloch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Sydow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Information retrieval", |
|
"volume": "12", |
|
"issue": "3", |
|
"pages": "275--299", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jakub Piskorski, Karol Wieloch, and Marcin Sydow. 2009. On knowledge-poor methods for person name matching and lemmatization for highly inflectional languages. Information retrieval, 12(3):275-299.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Bsnlp 2021 shared task: Multilingual named entity recognition and matching using bert and dedupe for slavic languages", |
|
"authors": [ |
|
{ |
|
"first": "Marko", |
|
"middle": [], |
|
"last": "Prelevikj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Slavko", |
|
"middle": [], |
|
"last": "Zitnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 8th Workshop on Balto-Slavic Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marko Prelevikj and Slavko Zitnik. 2021. Bsnlp 2021 shared task: Multilingual named entity recognition and matching using bert and dedupe for slavic lan- guages. In Proceedings of the 8th Workshop on Balto-Slavic Natural Language Processing. Euro- pean Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Slavonic information extraction and partial parsing", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Przepi\u00f3rkowski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the Workshop on Balto-Slavonic Natural Language Processing: Information Extraction and Enabling Technologies, ACL '07", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--10", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Przepi\u00f3rkowski. 2007. Slavonic information ex- traction and partial parsing. In Proceedings of the Workshop on Balto-Slavonic Natural Language Pro- cessing: Information Extraction and Enabling Tech- nologies, ACL '07, pages 1-10, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Named entity recognition and linking augmented with large-scale structured data", |
|
"authors": [ |
|
{ |
|
"first": "Pawe\u0142", |
|
"middle": [], |
|
"last": "Rychlikowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adrian", |
|
"middle": [], |
|
"last": "Lancucki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Kaczmarek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bart\u0142omiej", |
|
"middle": [], |
|
"last": "Najdecki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Wawrzy\u0144ski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Janowski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 8th Workshop on Balto-Slavic Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pawe\u0142 Rychlikowski, Adrian Lancucki, Adam Kacz- marek, Bart\u0142omiej Najdecki, Adam Wawrzy\u0144ski, and Wojciech Janowski. 2021. Named entity recog- nition and linking augmented with large-scale struc- tured data. In Proceedings of the 8th Workshop on Balto-Slavic Natural Language Processing. Eu- ropean Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Language Resources for Named Entity Annotation in the National Corpus of Polish", |
|
"authors": [ |
|
{ |
|
"first": "Agata", |
|
"middle": [], |
|
"last": "Savary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakub", |
|
"middle": [], |
|
"last": "Piskorski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Control and Cybernetics", |
|
"volume": "40", |
|
"issue": "2", |
|
"pages": "361--391", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Agata Savary and Jakub Piskorski. 2011. Language Resources for Named Entity Annotation in the Na- tional Corpus of Polish. Control and Cybernetics, 40(2):361-391.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Named entities in Czech: annotating data and developing NE tagger", |
|
"authors": [ |
|
{ |
|
"first": "Magda", |
|
"middle": [], |
|
"last": "\u0160ev\u010d\u00edkov\u00e1", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zden\u011bk", |
|
"middle": [], |
|
"last": "\u017dabokrtsk\u1ef3", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Old\u0159ich", |
|
"middle": [], |
|
"last": "Kruza", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "International Conference on Text, Speech and Dialogue", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "188--195", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Magda \u0160ev\u010d\u00edkov\u00e1, Zden\u011bk \u017dabokrtsk\u1ef3, and Old\u0159ich Kruza. 2007. Named entities in Czech: annotat- ing data and developing NE tagger. In International Conference on Text, Speech and Dialogue, pages 188-195. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Razpoznavanje imenskih entitet v slovenskem besedilu. Sloven\u0161\u010dina 2.0: empirical, applied and interdisciplinary research", |
|
"authors": [ |
|
{ |
|
"first": "Tadej", |
|
"middle": [], |
|
"last": "\u0160tajner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Toma\u017e", |
|
"middle": [], |
|
"last": "Erjavec", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Krek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "58--81", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tadej \u0160tajner, Toma\u017e Erjavec, and Simon Krek. 2013. Razpoznavanje imenskih entitet v slovenskem besedilu. Sloven\u0161\u010dina 2.0: empirical, applied and interdisciplinary research, 1(2):58-81.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "FactRuEval 2016: Evaluation of named entity recognition and fact extraction systems for Russian", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Starostin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Bocharov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Alexeeva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Bodrova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Chuchunkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Dzhumaev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Efimenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Granovsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Khoroshevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Krylova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Nikolaeva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Smurov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Toldova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Computational Linguistics and Intellectual Technologies. Proceedings of the Annual International Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "688--705", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. S. Starostin, V. V. Bocharov, S. V. Alexeeva, A. A. Bodrova, A. S. Chuchunkov, S. S. Dzhumaev, I. V. Efimenko, D. V. Granovsky, V. F. Khoroshevsky, I. V. Krylova, M. A. Nikolaeva, I. M. Smurov, and S. Y. Toldova. 2016. FactRuEval 2016: Evalua- tion of named entity recognition and fact extrac- tion systems for Russian. In Computational Lin- guistics and Intellectual Technologies. Proceedings of the Annual International Conference \"Dialogue\", pages 688-705.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Benchmarking pre-trained language models for multilingual ner: Traspas at the bsnlp2021 shared task", |
|
"authors": [ |
|
{ |
|
"first": "Marek", |
|
"middle": [], |
|
"last": "Suppa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ondrej", |
|
"middle": [], |
|
"last": "Jariabka", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 8th Workshop on Balto-Slavic Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marek Suppa and Ondrej Jariabka. 2021. Benchmark- ing pre-trained language models for multilingual ner: Traspas at the bsnlp2021 shared task. In Pro- ceedings of the 8th Workshop on Balto-Slavic Natu- ral Language Processing. European Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Introduction to the CoNLL-2002 shared task: Language-independent named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Erik Tjong Kim", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 6th Conference on Natural Language Learning", |
|
"volume": "20", |
|
"issue": "", |
|
"pages": "1--4", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1118853.1118877" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik Tjong Kim Sang. 2002. Introduction to the CoNLL-2002 shared task: Language-independent named entity recognition. In Proceedings of the 6th Conference on Natural Language Learning -Volume 20, COLING-02, pages 1-4, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Introduction to the CoNLL-2003 shared task: Language-independent named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Erik", |
|
"middle": [ |
|
"Tjong" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kim", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fien", |
|
"middle": [], |
|
"last": "De Meulder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the Seventh Conference on Natural Language Learning at HLT-NAACL 2003", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "142--147", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1119176.1119195" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the CoNLL-2003 shared task: Language-independent named entity recognition. In Proceedings of the Seventh Conference on Natural Language Learning at HLT-NAACL 2003 -Volume 4, CONLL '03, pages 142-147, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Multilingual slavic named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Rinalds", |
|
"middle": [], |
|
"last": "V\u012bksna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Inguna", |
|
"middle": [], |
|
"last": "Skadina", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 8th Workshop on Balto-Slavic Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rinalds V\u012bksna and Inguna Skadina. 2021. Multi- lingual slavic named entity recognition. In Pro- ceedings of the 8th Workshop on Balto-Slavic Natu- ral Language Processing. European Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Tools and methodologies for annotating syntax and named entities in the National Corpus of Polish", |
|
"authors": [ |
|
{ |
|
"first": "Jakub", |
|
"middle": [], |
|
"last": "Waszczuk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katarzyna", |
|
"middle": [], |
|
"last": "G\u0142owi\u0144ska", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Agata", |
|
"middle": [], |
|
"last": "Savary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Przepi\u00f3rkowski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the International Multiconference on Computer Science and Information Technology (IMC-SIT 2010): Computational Linguistics -Applications (CLA'10)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "531--539", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jakub Waszczuk, Katarzyna G\u0142owi\u0144ska, Agata Savary, and Adam Przepi\u00f3rkowski. 2010. Tools and methodologies for annotating syntax and named en- tities in the National Corpus of Polish. In Proceed- ings of the International Multiconference on Com- puter Science and Information Technology (IMC- SIT 2010): Computational Linguistics -Applica- tions (CLA'10), pages 531-539, Wis\u0142a, Poland. PTI.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"num": null, |
|
"text": "Example input and output formats.", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"num": null, |
|
"text": "Best average performance scores obtained by the teams on the two test data", |
|
"type_str": "figure" |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"text": "Overview of the training datasets.", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td/><td/><td/><td colspan=\"2\">COVID-19</td><td/><td/><td/><td colspan=\"4\">USA 2020 ELECTIONS</td><td/></tr><tr><td/><td>PL</td><td>CS</td><td>RU</td><td>BG</td><td>SL</td><td>UK</td><td>PL</td><td>CS</td><td>RU</td><td>BG</td><td>SL</td><td>UK</td></tr><tr><td>Documents</td><td>103</td><td>155</td><td>83</td><td>151</td><td>178</td><td>85</td><td>66</td><td>85</td><td>163</td><td>151</td><td>143</td><td>83</td></tr><tr><td>PER</td><td>419</td><td>478</td><td>559</td><td>351</td><td>834</td><td>215</td><td colspan=\"2\">566 447</td><td colspan=\"3\">3203 1539 2589</td><td>672</td></tr><tr><td>LOC</td><td>369</td><td>474</td><td>701</td><td colspan=\"2\">759 1228</td><td>364</td><td colspan=\"2\">827 277</td><td colspan=\"3\">3457 1093 1268</td><td>541</td></tr><tr><td>ORG</td><td>402</td><td>318</td><td>628</td><td>589</td><td>965</td><td>455</td><td>243</td><td>99</td><td>2486</td><td>557</td><td>578</td><td>384</td></tr><tr><td>EVT</td><td>240</td><td>393</td><td>435</td><td>465</td><td>612</td><td>269</td><td>86</td><td>63</td><td>396</td><td>170</td><td>118</td><td>257</td></tr><tr><td>PRO</td><td>137</td><td>155</td><td>400</td><td>168</td><td>274</td><td>143</td><td>87</td><td>56</td><td>846</td><td>240</td><td>254</td><td>124</td></tr><tr><td>Total</td><td colspan=\"12\">1567 1818 2723 2332 3913 1446 1810 942 10398 3599 4807 1978</td></tr><tr><td>Distinct</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>Surface forms</td><td>688</td><td colspan=\"4\">941 1436 1092 2190</td><td>622</td><td colspan=\"2\">484 377</td><td colspan=\"3\">3440 1117 1605</td><td>537</td></tr><tr><td>Lemmas</td><td>557</td><td colspan=\"4\">745 1133 1016 1774</td><td>509</td><td colspan=\"2\">356 279</td><td colspan=\"3\">2593 1019 1129</td><td>390</td></tr><tr><td>Entity IDs</td><td>404</td><td>562</td><td>796</td><td colspan=\"2\">764 1400</td><td>369</td><td colspan=\"2\">278 200</td><td>1669</td><td>668</td><td>833</td><td>270</td></tr></table>" |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"text": "", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF6": { |
|
"num": null, |
|
"text": "Cross-lingual entity linking.", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"text": "F1-measure results for the COVID-19 corpus. novation programme under grants 770299 (News-Eye). Work on Slovene was financed through the European Union's Horizon 2020 Research and Innovation Programme under grant agreement No 825153, Project EMBEDDIA: Cross-Lingual Embeddings for Less-Represented Languages in European News Media, as well as Slovenian Research Agency's project: Computer-assisted multilingual news discourse analysis with contextual embeddings (CANDAS, J6-2581).", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td>USA 2020 ELECTIONS</td><td/><td/><td/><td/><td>Language</td><td/><td/></tr><tr><td>Phase</td><td>Metric</td><td>bg</td><td>cs</td><td>pl</td><td>ru</td><td>sl</td><td>uk</td></tr><tr><td>Recognition</td><td>Relaxed</td><td>L3i</td><td colspan=\"3\">89.8 UWr-VL 91.3 Priberam 92.3 L3i</td><td colspan=\"2\">83.7 Priberam 91.5 TLD</td><td>84.6</td></tr><tr><td/><td>Partial</td><td colspan=\"3\">Priberam 88.7 Priberam 90.7 L3i</td><td>92.0 Priberam</td><td>83.4 L3i</td><td>91.5 Priberam</td><td>84.6</td></tr><tr><td/><td/><td colspan=\"2\">TraSpaS 88.1 L3i</td><td>90.2 TLD</td><td>90.8 TraSpaS</td><td colspan=\"2\">81.5 UWr-VL 90.4 L3i</td><td>84.5</td></tr><tr><td/><td/><td colspan=\"2\">UWr-VL 87.3 TLD</td><td colspan=\"2\">88.5 UWr-VL 89.8 TLD</td><td>80.9 TLD</td><td>89.8 TraSpaS</td><td>83.3</td></tr><tr><td/><td/><td>TLD</td><td>87.3 UL FRI</td><td colspan=\"2\">88.4 TraSpaS 89.2 UL FRI</td><td colspan=\"2\">80.5 TraSpaS 89.4 UWr-VL</td><td>83.3</td></tr><tr><td/><td/><td>UL FRI</td><td colspan=\"2\">86.9 TraSpaS 87.8 UL FRI</td><td>89.1 UWr-VL</td><td>77.2 UL FRI</td><td>88.6 UL FRI</td><td>83.2</td></tr><tr><td/><td/><td>PAISC</td><td>83.6 PAISC</td><td>82.6 PAISC</td><td>66.4 PAISC</td><td>77.1 PAISC</td><td>86.0 PAISC</td><td>77.0</td></tr><tr><td/><td/><td/><td/><td/><td colspan=\"2\">CTC-NER 75.4</td><td colspan=\"2\">CTC-NER 71.1</td></tr><tr><td>Normalization</td><td/><td colspan=\"4\">UWr-VL 51.3 UWr-VL 51.9 UWr-VL 62.1 TraSpaS</td><td colspan=\"2\">50.7 UWr-VL 62.4 UL FRI</td><td>56.9</td></tr><tr><td/><td/><td>UL FRI</td><td colspan=\"2\">21.9 TraSpaS 42.0 TLD</td><td>51.0 UL FRI</td><td>48.8 UL FRI</td><td>43.9 TraSpaS</td><td>56.8</td></tr><tr><td/><td/><td>TLD</td><td>19.1 TLD</td><td>40.1 UL FRI</td><td>50.1 TLD</td><td colspan=\"2\">46.5 TraSpaS 34.2 TLD</td><td>55.3</td></tr><tr><td/><td/><td colspan=\"2\">TraSpaS 17.9 UL FRI</td><td colspan=\"3\">39.7 TraSpaS 42.4 CTC-NER 44.8 TLD</td><td colspan=\"2\">31.9 CTC-NER 36.9</td></tr><tr><td/><td/><td>Priberam</td><td>0.0 Priberam</td><td>0.0 Priberam</td><td>0.0 UWr-VL</td><td>25.6 Priberam</td><td>0.0 UWr-VL</td><td>26.5</td></tr><tr><td/><td/><td>L3i</td><td>0.0 L3i</td><td>0.0 L3i</td><td>0.0 Priberam</td><td>0.0 L3i</td><td>0.0 Priberam</td><td>0.0</td></tr><tr><td/><td/><td>PAISC</td><td>0.0 PAISC</td><td>0.0 PAISC</td><td>0.0 L3i</td><td>0.0 PAISC</td><td>0.0 L3i</td><td>0.0</td></tr><tr><td/><td/><td/><td/><td/><td>PAISC</td><td>0.0</td><td>PAISC</td><td>0.0</td></tr><tr><td>Entity linking</td><td colspan=\"5\">Document UWr-VL 63.7 UWr-VL 64.3 UWr-VL 67.1 TLD</td><td colspan=\"2\">44.8 UWr-VL 67.3 UWr-VL</td><td>58.9</td></tr><tr><td/><td>level</td><td>TLD</td><td>58.7 TLD</td><td>55.3 TLD</td><td>62.3 UWr-VL</td><td>35.8 TLD</td><td>59.3 TLD</td><td>52.2</td></tr><tr><td/><td/><td colspan=\"2\">Priberam 12.5 UL FRI</td><td>37.5 UL FRI</td><td>44.9 UL FRI</td><td>32.2 UL FRI</td><td>43.3 UL FRI</td><td>28.8</td></tr><tr><td/><td/><td>L3i</td><td>12.1 L3i</td><td colspan=\"2\">30.5 Priberam 18.2 Priberam</td><td>12.3 L3i</td><td>18.3 Priberam</td><td>25.4</td></tr><tr><td/><td/><td colspan=\"3\">TraSpaS 11.7 Priberam 29.5 L3i</td><td>18.0 L3i</td><td colspan=\"2\">12.3 Priberam 17.9 L3i</td><td>23.9</td></tr><tr><td/><td/><td>PAISC</td><td colspan=\"3\">11.4 TraSpaS 28.6 TraSpaS 17.4 PAISC</td><td colspan=\"2\">9.9 TraSpaS 17.1 TraSpaS</td><td>23.5</td></tr><tr><td/><td/><td>UL FRI</td><td>4.5 PAISC</td><td>21.6 PAISC</td><td>13.4 TraSpaS</td><td>9.8 PAISC</td><td>15.8 PAISC</td><td>16.8</td></tr><tr><td/><td/><td/><td/><td/><td>CTC-NER</td><td>2.8</td><td>CTC-NER</td><td>1.5</td></tr><tr><td/><td>Single</td><td colspan=\"2\">UWr-VL 68.5 TLD</td><td>69.0 TLD</td><td>74.9 TLD</td><td>50.1 TLD</td><td>68.7 TLD</td><td>62.2</td></tr><tr><td/><td>language</td><td>TLD</td><td colspan=\"3\">67.1 UWr-VL 66.0 UWr-VL 69.9 UWr-VL</td><td colspan=\"2\">39.3 UWr-VL 66.5 UWr-VL</td><td>52.9</td></tr><tr><td/><td/><td>PAISC</td><td>12.8 UL FRI</td><td>50.0 UL FRI</td><td>37.7 UL FRI</td><td>13.6 UL FRI</td><td>21.3 UL FRI</td><td>23.0</td></tr><tr><td/><td/><td colspan=\"2\">Priberam 10.1 L3i</td><td colspan=\"2\">18.1 Priberam 14.8 Priberam</td><td>5.6 Priberam</td><td>8.4 TraSpaS</td><td>21.4</td></tr><tr><td/><td/><td>TraSpaS</td><td colspan=\"2\">8.6 Priberam 17.7 L3i</td><td>14.5 L3i</td><td>5.5 L3i</td><td>8.3 L3i</td><td>20.5</td></tr><tr><td/><td/><td>L3i</td><td colspan=\"3\">8.6 TraSpaS 17.7 TraSpaS 13.4 TraSpaS</td><td>5.1 TraSpaS</td><td>8.2 Priberam</td><td>20.2</td></tr><tr><td/><td/><td>UL FRI</td><td>8.3 PAISC</td><td>14.1 PAISC</td><td>10.7 PAISC</td><td>4.4 PAISC</td><td>7.2 PAISC</td><td>12.9</td></tr><tr><td/><td/><td/><td/><td/><td>CTC-NER</td><td>3.6</td><td>CTC-NER</td><td>9.4</td></tr></table>" |
|
}, |
|
"TABREF9": { |
|
"num": null, |
|
"text": "Evaluation results (F1-measure) for the USA 2020 ELECTION corpus.", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td/><td/><td colspan=\"2\">COVID-19</td><td/><td/><td/><td colspan=\"4\">USA 2020 ELECTIONS</td><td/></tr><tr><td>bg</td><td>cs</td><td>pl</td><td>ru</td><td>sl</td><td>uk</td><td>bg</td><td>cs</td><td>pl</td><td>ru</td><td>sl</td><td>uk</td></tr></table>" |
|
} |
|
} |
|
} |
|
} |