|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:04:58.256539Z" |
|
}, |
|
"title": "TermEval 2020: Shared Task on Automatic Term Extraction Using the Annotated Corpora for Term Extraction Research (ACTER) Dataset", |
|
"authors": [ |
|
{ |
|
"first": "Ayla", |
|
"middle": [], |
|
"last": "Rigouts Terryn", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Veronique", |
|
"middle": [], |
|
"last": "Hoste", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Drouin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Universit\u00e9 de Montr\u00e9al", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Els", |
|
"middle": [], |
|
"last": "Lefever", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "The TermEval 2020 shared task provided a platform for researchers to work on automatic term extraction (ATE) with the same dataset: the Annotated Corpora for Term Extraction Research (ACTER). The dataset covers three languages (English, French, and Dutch) and four domains, of which the domain of heart failure was kept as a held-out test set on which final f1-scores were calculated. The aim was to provide a large, transparent, qualitatively annotated, and diverse dataset to the ATE research community, with the goal of promoting comparative research and thus identifying strengths and weaknesses of various state-of-the-art methodologies. The results show a lot of variation between different systems and illustrate how some methodologies reach higher precision or recall, how different systems extract different types of terms, how some are exceptionally good at finding rare terms, or are less impacted by term length. The current contribution offers an overview of the shared task with a comparative evaluation, which complements the individual papers by all participants.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "The TermEval 2020 shared task provided a platform for researchers to work on automatic term extraction (ATE) with the same dataset: the Annotated Corpora for Term Extraction Research (ACTER). The dataset covers three languages (English, French, and Dutch) and four domains, of which the domain of heart failure was kept as a held-out test set on which final f1-scores were calculated. The aim was to provide a large, transparent, qualitatively annotated, and diverse dataset to the ATE research community, with the goal of promoting comparative research and thus identifying strengths and weaknesses of various state-of-the-art methodologies. The results show a lot of variation between different systems and illustrate how some methodologies reach higher precision or recall, how different systems extract different types of terms, how some are exceptionally good at finding rare terms, or are less impacted by term length. The current contribution offers an overview of the shared task with a comparative evaluation, which complements the individual papers by all participants.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Automatic Term Extraction (ATE) can be defined as the automated process of identifying terminology from a corpus of specialised texts. Despite receiving plenty of research attention, it remains a challenging task, not in the least because terms are so difficult to define. Terms are typically described as \"lexical items that represent concepts of a domain\" (Kageura and Marshman, 2019) , but such definitions leave room for many questions about the fundamental nature of terms. Since ATE is supposed to automatically identify terms from specialised text, the absence of a consensus about the basic characteristics of terms is problematic. The disagreement covers both practical aspects, such as term length and part-of-speech (POS) pattern, and theoretical considerations about the difference between words (or collocations/phrases) and terms. This poses great difficulties for many aspects of ATE, from data collection, to extraction methodology, to evaluation. Data collection, i.e. creating domain-specific corpora in which terms have been annotated, is time-and effortconsuming. When manual term annotation is involved, inter-annotator agreement is notoriously low and there is no consensus about an annotation protocol (Estop\u00e0, 2001) . This leads to a scarcity in available resources. Moreover, it means that the few available datasets are difficult to combine and compare, and often cover only a single language and domain. While the manual annotation bottleneck has often been circumvented by starting from existing resources, such as ontologies or terminological databases, specialised dictionaries, or book indexes, such strategies do not have the same advantages as manual annotation and will rarely cover all terms in an entire corpus. This is linked to the evaluation of ATE, for which the accepted metrics are precision (how many of the extracted terms are correct), recall (how many of the terms in the text have correctly been extracted), and f1-score (harmonic mean of the two). To calculate recall (and, therefore, also f1-score), it is necessary to know all true terms in a text. Since manual annotation is such an expensive operation, and relatively few resources are currently available, evaluation is often limited to either a single resource, or the calculation of precision. The ATE methodology itself, most notably the types of terms a system is designed to find, is impacted as well. Some of the most fundamental differences are term length (in number of tokens), term POS-pattern (sometimes only nouns and noun phrases, sometimes adjectives, adverbs, and verbs are included), and minimum term frequency. Differences which are more difficult to quantify are, for instance, how specialised or domain-specific a lexical unit needs to be before it is considered a term. These three aspects are closely related, since different systems and evaluation methods will be suited for different datasets. This combination of difficulties creates a hurdle for clear, comparative research. All of this can slow down the advance of ATE, especially now that (supervised) machine learning techniques are becoming more popular for the task. The TermEval shared task on ATE, using the ACTER Annotated Corpora for Term Extraction Research, was designed to lower these hurdles. The ACTER dataset contains specialised corpora in three languages (English, French, and Dutch) , and four domains (corruption, dressage (equitation), heart failure, and wind energy), which have been meticulously, manually annotated according to transparent guidelines. Both the texts and the annotations have been made freely available. The current version of the dataset presents the annotations as unstructured lists of all unique annotated terms (one term and its label per line), rather than providing the span of each occurrence of annotated terms in their context (which may be provided in future releases). The shared task brought together researchers to work on ATE with the same data and evaluation setup. It allowed a detailed comparison of dif-ferent methodologies. Standard evaluation methods (precision, recall, f1-score) were used for the basic evaluation and ranking; these are elaborated with more detailed evaluations as presented both in the current overview paper and in participants' contributions. The following sections start with a brief overview of current datasets and methodologies for ATE. In section 3, the ACTER dataset is described in some detail. The fourth section contains an overview of the shared task itself and the results. The final section is dedicated to a discussion and the conclusions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 358, |
|
"end": 386, |
|
"text": "(Kageura and Marshman, 2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1225, |
|
"end": 1239, |
|
"text": "(Estop\u00e0, 2001)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 3350, |
|
"end": 3378, |
|
"text": "(English, French, and Dutch)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Two of the most commonly used annotated datasets are GE-NIA (Kim et al., 2003) , and the ACL RD-TEC 2.0 (Qasemizadeh and Schumann, 2016), both of which are in English. GENIA is a collection of 2000 abstracts from the MEDLINE database in the domain of bio-medicine, specifically \"transcription factors in human blood cells\". Over 400k tokens were annotated by two domain experts to obtain 93,293 term annotations. The ACL-RD-TEC 2.0 contains 300 annotated abstracts from the ACL Anthology Reference Corpus. Again, two experts performed the annotation of 33k tokens, which resulted in 6818 term annotations. They claim three main advantages over GENIA: first, the domain (computational linguistics) means that ATE researchers will have a better understanding of the material. Second, the ACL RD-TEC corpus covers three decades, which allows some research of the evolution of terms. Third and finally, the annotation is more transparent, with freely available annotation guidelines and the possibility to download the annotations of both experts separately. There are other examples as well, such as the CRAFT corpus, another English corpus in the biomedical domain (99,907 annotations over 560k tokens) (Bada et al., 2012) , an English automotive corpus (28,656 annotations over 224,159 tokens) (Bernier-Colborne, 2012; Bernier-Colborne and Drouin, 2014), a diachronical English corpus on mechanical engineering (+10k annotations over 140k words) (Schumann and Fischer, 2016) , the TermITH French corpus on language sciences (14,544 unique validated terms found over 397,695 words) (TermITH, 2014; Billami et al., 2014) , a small German corpus on DIY, cooking, hunting and chess which focused on inter-annotator agreement between laypeople (912 annotations on which at least 5 out of 7 annotators agreed, over 3075 words) (H\u00e4tty and Schulte im Walde, 2018b) and, within the framework of the TTC project (Loginova et al., 2012) , lists of 107-159 annotated terms in corpora in seven languages and two domains (wind energy and mobile technology). While this is a non-exhaustive list, it illustrates an important and logical trend: either the created gold standard is quite large, with over 10k annotations, or it covers multiple languages and/or domains. While this is not necessarily problematic, the annotation guidelines for all of these corpora differ, and, therefore, the annotations themselves as well. That does create difficulties, since comparing ATE performance on multiple cor-pora will not necessarily reflect differences in performance between domains or languages, but may also show the contrast between the different annotation styles. The differences can be quite substantial, e.g. in GENIA and ACL RD-TEC, nested annotations are not allowed, in CRAFT they are only allowed under certain conditions, while in the TermITH project they are allowed in most cases. Moreover, it is important to note that the annotations of both the TermITH project and the TTC project are based on the manual annotation of ATE results, rather than manual annotations in the unprocessed text. A final remark is that some corpora have been annotated with multiple term labels or have even been annotated according to large taxonomies, while others don't make any distinctions beyond terms. As will be discussed in more detail in section 3, the ACTER dataset has been specifically designed to deal with some of the issues addressed here.", |
|
"cite_spans": [ |
|
{ |
|
"start": 60, |
|
"end": 78, |
|
"text": "(Kim et al., 2003)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1201, |
|
"end": 1220, |
|
"text": "(Bada et al., 2012)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1445, |
|
"end": 1473, |
|
"text": "(Schumann and Fischer, 2016)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 1580, |
|
"end": 1595, |
|
"text": "(TermITH, 2014;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 1596, |
|
"end": 1617, |
|
"text": "Billami et al., 2014)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1820, |
|
"end": 1855, |
|
"text": "(H\u00e4tty and Schulte im Walde, 2018b)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1901, |
|
"end": 1924, |
|
"text": "(Loginova et al., 2012)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Manually Annotated Gold Standards for ATE", |
|
"sec_num": "2.1." |
|
}, |
|
{ |
|
"text": "Traditionally, three types of ATE methodologies are identified: linguistic (relying on linguistic information, such as POS-patterns and chunking), statistical (using frequencies, often compared to a reference corpus, to calculate termhood and unithood (Kageura and Umino, 1996) ), and hybrid methods (which combine the two). It has been established for some time that hybrid methods appear to outperform the other two (Macken et al., 2013) . These methods typically select candidate terms based on their POS-pattern and rank these candidate terms using the statistical metrics, thus combining the advantages of both techniques. A particular difficulty is defining the cut-off threshold for the term candidates, which can be defined as the top-n terms, the top-n percentage of terms, or all terms above a certain threshold score. Manually predicting the ideal cut-off point is extremely difficult and can result in a skew towards either precision or recall, which can be detrimental to the final f1score (Rigouts Terryn et al., 2019a) . While this typology of linguistic, statistical, and hybrid systems is sometimes still used today, in recent years, the advance of machine learning techniques has made such a simple classification of ATE methodologies more complicated (Gao and Yuan, 2019) . Methodologies have become so diverse that they are no longer easily captured in such a limited number of clearly delineated categories. For instance, apart from the distinction between statistical and linguistic systems, one could also distinguish between rulebased methods and machine learning methods. However, rather than a simple binary distinction, there is quite a range of options: methods that rely on a single statistical score (Drouin, 2003; Kosa et al., 2020) , systems that combine a limited number of features with a voting algorithm (Fedorenko et al., 2013; Vivaldi and Rodr\u00edguez, 2001) , an evolutionary algorithm that optimises the ROC-curve (Az\u00e9 et al., 2005) , rule-induction (Foo and Merkel, 2010), supportvector models (Ramisch et al., 2010) , logistic regression (Bolshakova et al., 2013; Judea et al., 2014) , basic neural networks (H\u00e4tty and Schulte im Walde, 2018a), recursive neural networks (Kucza et al., 2018) , siamese neural networks (Shah et al., 2019) , and convolutional neural networks (Wang et al., 2016) . Within the machine learn-ing systems, there are vast differences between supervised, semi-supervised, and unsupervised systems, as well as the distinction between sequence labelling approaches and systems that start from a limited list of unique term candidates. Splitting systems by their features is perhaps even more difficult, since research has moved far beyond using simple linguistic and statistical features. Some examples include the use of topic modelling (\u0160ajatovi\u0107 et al., 2019; Bolshakova et al., 2013) , queries on search engines, Wikipedia, or other external resources (Kessler et al., 2019; Vivaldi and Rodr\u00edguez, 2001) , and word embeddings (Amjadian et al., 2016; Kucza et al., 2018; Qasemizadeh and Handschuh, 2014; Pollak et al., 2019) . Some methods are even called \"featureless\" (Gao and Yuan, 2019; Wang et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 252, |
|
"end": 277, |
|
"text": "(Kageura and Umino, 1996)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 418, |
|
"end": 439, |
|
"text": "(Macken et al., 2013)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1003, |
|
"end": 1033, |
|
"text": "(Rigouts Terryn et al., 2019a)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 1270, |
|
"end": 1290, |
|
"text": "(Gao and Yuan, 2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1730, |
|
"end": 1744, |
|
"text": "(Drouin, 2003;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 1745, |
|
"end": 1763, |
|
"text": "Kosa et al., 2020)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1840, |
|
"end": 1864, |
|
"text": "(Fedorenko et al., 2013;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1865, |
|
"end": 1893, |
|
"text": "Vivaldi and Rodr\u00edguez, 2001)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 1951, |
|
"end": 1969, |
|
"text": "(Az\u00e9 et al., 2005)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 2032, |
|
"end": 2054, |
|
"text": "(Ramisch et al., 2010)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2077, |
|
"end": 2102, |
|
"text": "(Bolshakova et al., 2013;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 2103, |
|
"end": 2122, |
|
"text": "Judea et al., 2014)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 2210, |
|
"end": 2230, |
|
"text": "(Kucza et al., 2018)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 2257, |
|
"end": 2276, |
|
"text": "(Shah et al., 2019)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 2313, |
|
"end": 2332, |
|
"text": "(Wang et al., 2016)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 2801, |
|
"end": 2825, |
|
"text": "(\u0160ajatovi\u0107 et al., 2019;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2826, |
|
"end": 2850, |
|
"text": "Bolshakova et al., 2013)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 2919, |
|
"end": 2941, |
|
"text": "(Kessler et al., 2019;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 2942, |
|
"end": 2970, |
|
"text": "Vivaldi and Rodr\u00edguez, 2001)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 2993, |
|
"end": 3016, |
|
"text": "(Amjadian et al., 2016;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 3017, |
|
"end": 3036, |
|
"text": "Kucza et al., 2018;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 3037, |
|
"end": 3069, |
|
"text": "Qasemizadeh and Handschuh, 2014;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 3070, |
|
"end": 3090, |
|
"text": "Pollak et al., 2019)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 3136, |
|
"end": 3156, |
|
"text": "(Gao and Yuan, 2019;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 3157, |
|
"end": 3175, |
|
"text": "Wang et al., 2016)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ATE", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "There are many more ways in which ATE systems can vary. Some can already be deduced from the ways in which the datasets are annotated, such as support for nested terms. Another very fundamental difference is the frequency cutoff: many ATE systems only extract terms which appear above a certain frequency threshold in the corpora. This threshold is extremely variable, with some systems that do not have any threshold, others that only extract candidate terms which appear 15 times or more (Pollak et al., 2019) , and still others where only the top-n most frequent terms are extracted (Loukachevitch, 2012). Term length is similarly variable, with systems that don't place any restrictions, others that extract only single-word terms, only multi-word terms, or those that extract all terms between 1 and n tokens (with n ranging from 2 to 15), where n is sometimes determined by the restrictions of a system, sometimes experimentally set to an optimal value, and at other times directly determined by the maximum term length in a gold standard. There are many other possible differences, such as POS patterns, which will not be discussed in any detail here. More information regarding both datasets for ATE and different ATE methodologies can be found in Rigouts Terryn et al. (2019b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 490, |
|
"end": 511, |
|
"text": "(Pollak et al., 2019)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1264, |
|
"end": 1285, |
|
"text": "Terryn et al. (2019b)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ATE", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "With such a great variety of methodologies, comparative research is essential to identify the strengths and weaknesses of the respective strategies. However, as discussed, appropriate datasets are scarce and often limited. This means that ATE systems are regularly scored solely on precision (or some variation thereof), since recall and f1-score cannot be calculated without knowing all true terms in a corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ATE", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "Considering the expense of data annotation, the extra effort required is rarely feasible. The strictness of the evaluation varies as well, such as determining how specialised a term candidate needs to be for it to be considered a true term, and validating only full matches or also partial ones. Moreover, scores for sequence labelling approaches are difficult to compare to scores for approaches that provide ranked lists of unique terms. There is even disagreement on the required expertise for annotators: do they need to be domain experts or terminologists? This disparity does not only make comparisons between systems highly problematic, it also means that many systems are evaluated on only a single domain (and language).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ATE", |
|
"sec_num": "2.2." |
|
}, |
|
{ |
|
"text": "ACTER is a collection of domain-specific corpora in which terms have been manually annotated. It covers three languages (English, French, and Dutch) and four domains (corruption, dressage (equitation), heart failure, and wind energy). It has been created in light of some of the perceived difficulties that have been mentioned. A previous version (which did not yet bear the ACTER acronym) has already been elaborately described (Rigouts Terryn et al., 2019b), so we refer the interested reader to this work for more detailed information. However, the current version of the dataset has been substantially updated since then, to be even more consistent. All previous annotations have been double-checked, inconsistent annotations were automatically found and manually edited when necessary, and, with this shared task, a first version has been made publicly available. Therefore, the remainder of this section will focus on the up-to-date statistics of version 1.2 of the ACTER dataset (version 1.0 was the first to appear online for the shared task). The annotation guidelines have been updated as well and are freely available 1 . Discontinuous terms (e.g. in ellipses) have been annotated, but are not yet included in ACTER 1.2, and neither are the cross-lingual annotations in the domain of heart failure. The changes made between ACTER versions are indicated in detail in the included README.md file and the biggest difference between version 1.0 and 1.2 (besides some 120 removed or added annotations) is the inclusion of the label of each annotation. The dataset contains trilingual comparable corpora in all domains: the corpora in the same domain are similar in terms of subject, style, and length for each language, but they are not translations (and, therefore, cannot be aligned). Additionally, for the domain of corruption, there is a trilingual parallel corpus of aligned translations. For each language and domain, around 50k tokens have been manually annotated (in the case of corruption, the annotations have only been made in the parallel corpus, so the comparable corpus on corruption is completely unannotated). In all domains except heart failure, the complete corpora are larger than only the annotated parts, and unannotated texts are included (separately) as well. The texts are all plain text files and the sources have been included in the downloadable version. The annotations have been performed in the BRAT annotation tool (Stenetorp et al., 2011) , but they are currently provided as flat lists with one term per line. The annotations have all been performed by a single annotator with experience in the field of terminology and ATE and fluent in all three languages. However, she is not a domainexpert, except in the domain of dressage. Multiple semiautomatic checks have been performed to ensure the best possible annotation quality and inter-annotator agreement studies were performed and published (Rigouts Terryn et al., 2019b) to further validate the dataset. Furthermore, the elaborate guidelines helped the annotator to make consistent decisions and make the entire process more transparent. Nevertheless, term annotation remains an ambiguous bioprosthetic valve replacement Specific Term biopsies Common Term biopsy Common Term biosynthetic enzymes Specific Term bisoprolol Specific Term bisphosphonates Specific Term Table 1 : Sample of one of the gold standard term lists in the ACTER 1.2 dataset to illustrate the format and subjective task. We do not claim that ours is the only possible interpretation and, therefore, when using ACTER for ATE evaluation purposes, always recommend checking the output for a more nuanced evaluation (e.g. Rigouts Terryn et al. (2019a)). While ATE for TermEval has been perceived as a binary task (term or not), the original annotations included four different labels. There are three term labels, for which terms are defined by their degree of domain-specificity (are they relevant to the domain) and lexicon-specificity (are they known only by experts, or by laypersons as well).", |
|
"cite_spans": [ |
|
{ |
|
"start": 2452, |
|
"end": 2476, |
|
"text": "(Stenetorp et al., 2011)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 2941, |
|
"end": 2962, |
|
"text": "Terryn et al., 2019b)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 3213, |
|
"end": 3376, |
|
"text": "Specific Term biopsies Common Term biopsy Common Term biosynthetic enzymes Specific Term bisoprolol Specific Term bisphosphonates Specific Term Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "ACTER Annotated Corpora for Term Extraction Research", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "The three term labels defined this way are: Specific Terms (which are both domain-and lexicon-specific), Common Terms (domain-specific, not lexicon-specific), and Outof-Domain (OOD) Terms (not domain-specific, lexiconspecific). In the domain of heart failure, for instance, ejection fraction might be a Specific Term: laypersons generally do not know what it means, and it is strongly related to the domain of heart failure, since it is an indication of the volume of blood the heart pumps on each contraction. Heart is an example of a Common Term: it is clearly domain-specific to heart failure and you do not need to be an expert to have a basic idea of what a heart is. An example of an OOD term might be p-value, which is lexicon-specific since you need some knowledge of statistics to know the term, but it is not domain-specific to heart failure. In addition to these three term labels, Named Entities (proper names of persons, organisations, etc.) were annotated as well, as they share a few characteristics with terms: they will appear more often in texts with a relevant subject (e.g. brand names of medicine in the field of heart failure) and, like multi-word terms, have a high degree of unithood (internal cohesion). Labelling these does not mean we consider them to be terms, but it offers more options for the evaluation and training based on the dataset. Since TermEval was set up as a binary task, all three term labels were combined and considered as true terms. There were two separate datasets regarding the Named Entities: one including both terms and Named Entities, one with only terms. All participating systems were evaluated on both datasets. Moreover, while the evaluation for the ranking of the participating systems was based only on these two binary interpretations, the four labels were made available afterwards for a more detailed evaluation of the results. The gold standard lists of terms were ordered alphabetically, so with no relation to their labels or degree of termhood. Table 1 shows a sample of such a gold standard list, with one unique term per line followed by its label. Tables 2 and 3 provide more details on ACTER 1.2. Ta-ble 2 shows the number of documents and words per corpus, both in the entire corpus and only the annotated part of the corpus. Table 3 provides details on the number of annotations per corpus, counting either all annotations or all unique annotations. In total, 119,455 term and Named Entity annotations have been made over 596,058 words, resulting in 19,002 unique annotations. As can be seen, the number of annotations within a domain is usually somewhat similar for all languages (since the corpora are comparable), with larger differences between the domains. Version 1.2 of ACTER only provides a list of all unique lowercased terms (and Named Entities) per corpus. The aim is to release future versions with all in-text annotation spans, where every occurrence of each term is annotated, so that it can be used for sequence-labelling approaches as well. It is important to note that, since the annotation process was completely manual, each occurrence of a term was evaluated separately. When a lexical unit was only considered a term in some contexts, it was only annotated in those specific contexts. For instance, the word sensitivity can be used in general language, where it will not be annotated, but also as a synonym of recall (true positive rate), in which case it was annotated as a term. Additional characteristics to bear in mind about these annotations are that nested annotations are allowed (as long as the nested part can be used as a term on its own), and that there were no restrictions on term length, term frequency, or term POS-pattern (apart from the condition that terms had to contain a content word). If a lexical unit was used as a term in the text, it was annotated, even if it was not the best or most frequently used term for a certain concept. The reasoning behind this strategy was that one of the most important applications of ATE is to be able to keep up with fast-evolving terminology in increasingly more specialised domains. If only well-established, frequent terms are annotated, the rare and/or new terms will be ignored, even though these could be particularly interesting for ATE. While these qualities were all chosen to best reflect the desired applications for ATE, they do result in a particularly difficult dataset for ATE, so f1-scores for ATE systems tested on ACTER are expected to be rather modest in comparison to some other datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 2011, |
|
"end": 2018, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2117, |
|
"end": 2131, |
|
"text": "Tables 2 and 3", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 2167, |
|
"end": 2175, |
|
"text": "Ta-ble 2", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 2297, |
|
"end": 2304, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "ACTER Annotated Corpora for Term Extraction Research", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "The aim of the TermEval shared task was to provide a platform for researchers to work on the same task, with the same data, so that different methodologies for ATE can easily be compared and current strengths and weaknesses of ATE can be identified. During the training phase, participants all received the ACTER dataset as described in the previous section, with all domains apart from heart failure. The latter is provided during the final phase as the test set on which the scores are calculated. As described in the previous section, ACTER 1.2 consists of flat lists of unique terms per corpus, with one term per line. Since this first version of the shared task aims to focus on ATE in general, rather than term variation, all terms are lowercased, and only identical lowercased terms are merged in a single entry, without lemmatisation. Even when terms acquire . We do not discount the importance of ATE systems that handle term variation, but a choice was made to focus on the core task for the first edition of the task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setup", |
|
"sec_num": "4.1." |
|
}, |
|
{ |
|
"text": "There are three different tracks (one per language) and participants could enter in one or multiple tracks. When participants submitted their final results on the test data (as a flat list of unique lowercased terms, like the training data), f1-scores were calculated twice: once compared to the gold standard with only terms, once compared to the gold standard with both terms and Named Entities. These double scores did not influence the final ranking based on f1-scores. The dataset has been used for more detailed evaluations as well (see section 4.3) and participants were encouraged to report scores on the training domains in their own papers as well.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setup", |
|
"sec_num": "4.1." |
|
}, |
|
{ |
|
"text": "Five teams participated in the shared task: TALN-LS2N (Hazem et al., 2020) , RACAI (Pais and Ion, 2020) , e-Terminology (Oliver and V\u00e0zquez, 2020) , NLPLab UQAM (no system description paper), and NYU (no system description paper but based on previous work in Meyers et al. (2018) ). NYU and RACAI participated only in the English track, TALN-LS2N participated in both the English and French tracks, and e-Terminology and NLPLab UQAM participated in all tracks. We refer to their own system description papers for more details, but will provide a short summary of each of their methodologies. Team NYU has applied an updated version of Termolator (Meyers et al., 2018) . Candidate terms are selected based on \"terminological chunking and abbreviations\". The terminological chunking focuses, among others, on nominalisations, out-of-vocabulary words, and technical adjectives (based on suffixes) to find terms. Constructions where full forms are followed by their abbreviations are also taken into account. Next, three distributional metrics (e.g. TFIDF) are combined with equal weights and a \"well-formedness score\" is calculated, using mainly linguistic and morphological information. Additionally, a relevance score is based on the results of an online search engine. The final selection of candidate terms is made based on the product of these three metrics. Due to the timing of the shared task, Termolator was not specifically tuned to the ACTER dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 54, |
|
"end": 74, |
|
"text": "(Hazem et al., 2020)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 83, |
|
"end": 103, |
|
"text": "(Pais and Ion, 2020)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 120, |
|
"end": 146, |
|
"text": "(Oliver and V\u00e0zquez, 2020)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 259, |
|
"end": 279, |
|
"text": "Meyers et al. (2018)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 646, |
|
"end": 667, |
|
"text": "(Meyers et al., 2018)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participants", |
|
"sec_num": "4.2." |
|
}, |
|
{ |
|
"text": "Team e-Terminology uses the TSR (Token Slot Recognition) technique, implemented in TBXTools (Oliver and Vazquez, 2015; V\u00e0zquez and Oliver, 2018) . For Dutch, the statistical version of TBXTools is employed, for English and French the linguistic version is used. Stopwords are filtered out and all candidate terms that appear below a frequency threshold of two. As a terminological reference for each language (required for the TSR technique), the IATE database for 12-Law was chosen.", |
|
"cite_spans": [ |
|
{ |
|
"start": 92, |
|
"end": 118, |
|
"text": "(Oliver and Vazquez, 2015;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 119, |
|
"end": 144, |
|
"text": "V\u00e0zquez and Oliver, 2018)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participants", |
|
"sec_num": "4.2." |
|
}, |
|
{ |
|
"text": "Team RACAI uses a combination of statistical approaches, such as an improved TextRank (Zhang et al., 2018) , TFIDF, clustering, and termhood features. Algorithms were adapted where possible to make use of pre-trained word embeddings and the result was generated using several voting and combinatorial approaches. Special attention is also paid to the detection of nested terms. Team TALN-LS2N uses BERT as a binary classification model for ATE. The model's input is represented as the concatenation of a sentence and a selected n-gram within the sentence. If the n-gram is a term, the input is labelled as positive training example. If not, a corresponding negative example is generated. Team NLPLab UQAM applied a bidirectional LSTM. Pre-trained GloVe word embedding were used to train a neural network-based model on the training corpora.", |
|
"cite_spans": [ |
|
{ |
|
"start": 86, |
|
"end": 106, |
|
"text": "(Zhang et al., 2018)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participants", |
|
"sec_num": "4.2." |
|
}, |
|
{ |
|
"text": "Precision, recall, and f1-scores were calculated both including and excluding Named Entities, for each team in all tracks. The scores and resulting ranking are presented in Table 3 . As can be seen, TALN-LS2N's system outperforms all others in the English and French tracks. NLPLab UQAM's system outperforms e-Terminology for the Dutch track (though their respective rankings for English and Dutch are reversed). Scores with and without Named Entities are usually very similar (average difference of one percentage point), with e-Terminology and NYU scoring slightly better when Named Entities are excluded, and the others scoring better when they are included. On average, precision is higher than recall, especially when Named Entities are included. However, there is much variation. For instance, TALN-LS2N's English system obtains 36-40% more recall than precision (the difference is only 6-9% for their French system). Comparatively, e-Terminology obtains 20% higher precision than recall on average and NLPLab UQAM obtains more balanced precision and recall scores. The number of extracted term candidates varies greatly as well, from 744 (e-Terminology in Dutch), to 5267 (TALN-LS2N in English). Therefore, even though TALN-LS2N achieves the highest f1-scores thanks to great recall in English, their system also produces most noise, with 3435 false positives (including Named Entities). The average number of extracted candidate terms 2038is not too different from the average number of terms in the gold standard (2422 incl. Named Entities, 1720 without). Looking at performance of systems in multiple tracks, there does not appear to be one language that is inherently easier or more difficult. TALN-LS2N's best performance is reached for French, e-Terminology's for English, and NLPLab UQAM's for Dutch. As with many other task within natural language processing, the methodology based on the BERT transformer model appears to outperform other approaches. However, the large gap between precision and recall for the English model, which is much smaller for the French model, may be an indication of an often-cited downside of deep learning models: their unpredictability. For ATE, predictability is cited as at least as important as f1-scores: \"for ATE to be usable, its results should be consistent, predictable and transparent\" (Kageura and Marshman, 2019) . Additionally, it appears that neural networks and word embeddings do not always work for this task, as demonstrated by the fact that, for English and French, NLPLab UQAM's bidirectional LSTM approach with GLOVE embeddings is ranked last, below non-neural approaches such as NYU's.", |
|
"cite_spans": [ |
|
{ |
|
"start": 2341, |
|
"end": 2369, |
|
"text": "(Kageura and Marshman, 2019)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 173, |
|
"end": 180, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.3." |
|
}, |
|
{ |
|
"text": "Apart from the ranking based on f1-scores, three different aspects of the results are analysed in more detail: composition of the output, recall of terms with different frequencies, and recall of terms with different lengths. Figure 1 shows the first of these, illustrating the composition of the gold standard regarding the four annotation labels, versus the true positives from each team. The results are averaged over all languages, as the differences between the languages were small. False positives were not included, since these can be deduced from the precision scores. The graphs are relative, so they do not represent the absolute number of annotations per type, only the proportions. The order of the teams is the order of their ranks for the English track. A first observation is that all teams seem to extract at least some Named Entities, except for e-Terminology. This may be partly due to their low recall, but since they did not extract a single Named Entity in any of the languages, it does appear that their system is most focused on terms. While the differences are never extreme, the various systems do show some variation in this respect. For instance, the two lowest ranked systems can be seen to extract relatively more Common Terms. This may be an indication that they are sensitive to frequency, as many of the Specific Terms are rarer (e.g., e-Terminology employs a frequency threshold of two). Conversely, NYU's system appears to excel at extracting these Specific Terms and also extracts relatively few Named Entities. The output of two top-scoring teams has a very similar composition to the gold standard, which Table 4 : Scores (as percentages) and rank for all teams per track Figure 1 : Proportion of Specific, Common, and OOD Terms, and Named Entities in the gold standard versus the true positives extracted by each team (averaged over all languages if teams participated in multiple tracks). may be part of the explanation for their high scores, and, in the case of TALN-LS2N's system, may be related to their reliance on the training data.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 226, |
|
"end": 234, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1643, |
|
"end": 1650, |
|
"text": "Table 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1710, |
|
"end": 1718, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.3." |
|
}, |
|
{ |
|
"text": "A preference for Common Terms or Specific Terms can already give an indication of the system performance for rare terms, but we can also look directly at the recall of terms for various frequencies, as shown in Figure 2 . Here, the recall of all systems for various term frequencies is shown for the English track. Results for the other languages were similar, so will not be discussed separately. The dataset actually contains many hapax terms (which appear only once). In English, when Named Entities are included, there are 1121 (43%) hapax terms, 398 (15%) terms that appear twice, 220 (9%) terms that appear three times, 232 (9%) terms with a frequency between 4 and 5, 259 (10%) terms with a frequency between 5 and 10, 199 (8%) terms with a frequency between 10 and 25, and only 156 (6%) terms that appear more than 25 times. In line with previous findings on the difficulties of ATE, recall is lowest for hapax terms for all systems, and increases as frequency increases. Of course, e-Terminology has 0% recall for hapax terms due to the frequency cut-off, but the other systems also have difficulties. Notably, TALN-LS2N's system obtains a surprisingly stable recall for various frequencies and a very high recall of 64% for hapax terms. This is likely a consequence of the fact that they use none of the traditional statistical (frequency-related) metrics for ATE. Recall is almost always highest for the most frequent terms, though when looking at these frequent terms in more detail, recall appears to drop again for the most extreme cases (terms appearing over 100 times; not represented separately in Figure 2 ), presumably because these are more difficult to distinguish from common general language words.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 211, |
|
"end": 219, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 1615, |
|
"end": 1624, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.3." |
|
}, |
|
{ |
|
"text": "The final analysis concerns term length. Similarly to the analysis for frequency, Figure 3 presents recall for different term lengths per team, using the English data, including Named Entities, as a reference. The majority of gold standard terms are single-word terms (swts) (1170, or 45%), with frequencies decreasing as term length increases (800 or 31% 2-word terms (2wts), 376 or 15% 3wts, 144 or 6% 4wts, 40 or 2% 5wts, and 55 or 2% terms that are longer than 5 tokens. As can be seen in Figure 3 , two out of five teams (RACAI and NLPLab UQAM) have lower recall for 2wts than for swts, and, overall, recall decreases for terms with more than 3 tokens. TALN-LS2N extracts no terms beyond a length of 3 tokens at all, though this is different for their French system, where recall decreases more gradually with term length. NYU's system has a surprisingly stable performance for different term lengths, especially compared to TALN-LS2N and RACAI.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 82, |
|
"end": 90, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 493, |
|
"end": 501, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.3." |
|
}, |
|
{ |
|
"text": "Five different teams submitted their results for the Ter-mEval shared task on ATE, based on the ACTER dataset. With the domains of corruption, dressage, and wind energy from the dataset as training data or simply as reference material, the teams either used (and adapted) their existing systems or developed a new methodology for ATE. The domain of heart failure was used as the test set, with three different tracks for English, French and Dutch. The teams were all ranked based on the f1-score they obtained on the test data, with additional evaluations of the types of terms they extracted and recall for different term frequencies and term lengths.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Conclusions", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "The results show quite a large variation between all methodologies. The highest scores were obtained by a deep learning methodology using BERT as a binary classification model. The second best system does not rely on deep learning and combines pre-trained word embeddings with more classical features for ATE, such as statistical termhood measures. Such results show how there is still a lot of potential for deep learning techniques in the field of ATE, highlighting also the importance of large datasets like ACTER. However, it also illustrates that more traditional methodologies can still lead to state-of-the-art results as well, especially when updated with features like word em-beddings. The more detailed analyses also revealed how the composition of the output of the different systems varies, e.g., including or excluding more Named Entities, and focusing on either the most domain-specific and specialised terms (Specific Terms) or also on more general terms (Common Terms). This is a clear indication of how different applications for ATE may require different methodologies. For instance, translators may be more interested in a system that extracts mostly Specific Terms, since Common Terms may already be part of their general vocabulary. Checking recall for terms with different frequencies and terms with different lengths confirmed two often-cited weaknesses of ATE: low-frequency terms and long terms are more difficult to extract. However, in each case, there were some systems for which the performance was more stable and less impacted by these factors. The winning deep learning approach achieves a high recall even for hapax terms (64%) and one of the rule-based systems maintains a more or less stable recall for terms up to a length of five tokens. With these results, we conclude that there remains a lot of room for improvement in the field of ATE, both by trying the latest deep learning methodologies which have been successfully used in other natural language processing tasks, and by updating and combining more traditional methodologies with state-of-the-art features and algorithms. Taking into account the unpredictability of many machine learning approaches and the considerable variety between the potential outputs, as demonstrated in this shared task, it is essential for ATE to be evaluated beyond precision, recall, and f1-scores. To further encourage and facilitate both supervised machine learning approaches and high-quality evaluations on diverse data, the complete AC-TER dataset has been made freely available online (Rigouts Terryn, Ayla and Drouin, Patrick and Hoste, V\u00e9ronique and Lefever, Els, 2020).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Conclusions", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "http://hdl.handle.net/1854/LU-8503113", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Local-Global Vectors to Improve Unigram Terminology Extraction", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Amjadian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Inkpen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Paribakht", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Faez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 5th International Workshop on Computational Terminology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2--11", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amjadian, E., Inkpen, D., Paribakht, T., and Faez, F. (2016). Local-Global Vectors to Improve Unigram Ter- minology Extraction. In Proceedings of the 5th Interna- tional Workshop on Computational Terminology, pages 2-11, Osaka, Japan.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Preference Learning in Terminology Extraction: A ROCbased approach", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Az\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Roche", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Kodratoff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Sebag", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceeedings of Applied Stochastic Models and Data Analysis", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "209--2019", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:cs/0512050" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Az\u00e9, J., Roche, M., Kodratoff, Y., and Sebag, M. (2005). Preference Learning in Terminology Extraction: A ROC- based approach. In Proceeedings of Applied Stochas- tic Models and Data Analysis, pages 209-2019, Brest, France. arXiv: cs/0512050.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Concept annotation in the CRAFT corpus", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Bada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Eckert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Evans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Garcia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Shipley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Sitnikov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Baumgartner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Verspoor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Blake", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Hunter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "BMC Bioinformatics", |
|
"volume": "13", |
|
"issue": "", |
|
"pages": "161--180", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bada, M., Eckert, M., Evans, D., Garcia, K., Shipley, K., Sitnikov, D., Baumgartner, W. A., Cohen, K. B., Ver- spoor, K., Blake, J. A., and Hunter, L. E. (2012). Con- cept annotation in the CRAFT corpus. BMC Bioinfor- matics, 13:161-180.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Creating a test corpus for term extractors through term annotation", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Bernier-Colborne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Drouin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Terminology", |
|
"volume": "20", |
|
"issue": "1", |
|
"pages": "50--73", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bernier-Colborne, G. and Drouin, P. (2014). Creating a test corpus for term extractors through term annotation. Terminology, 20(1):50-73.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Defining a Gold Standard for the Evaluation of Term Extractors", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Bernier-Colborne", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 8th international conference on Language Resources and Evaluation (LREC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bernier-Colborne, G. (2012). Defining a Gold Standard for the Evaluation of Term Extractors. In Proceedings of the 8th international conference on Language Resources and Evaluation (LREC), Istanbul, Turkey. ELRA.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Annotation s\u00e9mantique et validation terminologique en texte int\u00e9gral en SHS", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Billami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Camacho-Collados", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Jacquey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Kister", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of TALN 2014", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "363--376", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Billami, M., Camacho-Collados, J., Jacquey, E., and Kister, L. (2014). Annotation s\u00e9mantique et validation termi- nologique en texte int\u00e9gral en SHS. In Proceedings of TALN 2014, pages 363-376, Marseille, France.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Topic Models Can Improve Domain Term Extraction", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Bolshakova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Loukachevitch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Nokel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Advances in Information Retrieval", |
|
"volume": "7814", |
|
"issue": "", |
|
"pages": "684--687", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bolshakova, E., Loukachevitch, N., and Nokel, M. (2013). Topic Models Can Improve Domain Term Extraction. In David Hutchison, et al., editors, Advances in Information Retrieval, volume 7814, pages 684-687. Springer Berlin Heidelberg, Berlin, Heidelberg.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Term Extraction Using Non-Technical Corpora as a Point of Leverage", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Drouin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Terminology", |
|
"volume": "9", |
|
"issue": "1", |
|
"pages": "99--115", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Drouin, P. (2003). Term Extraction Using Non-Technical Corpora as a Point of Leverage. Terminology, 9(1):99- 115.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Les unit\u00e9s de signification sp\u00e9cialis\u00e9e\u015b elargissant l'objet du travail en terminologie", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Estop\u00e0", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Terminology", |
|
"volume": "7", |
|
"issue": "2", |
|
"pages": "217--237", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Estop\u00e0, R. (2001). Les unit\u00e9s de signification sp\u00e9cialis\u00e9e\u015b elargissant l'objet du travail en terminologie. Terminol- ogy, 7(2):217-237.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Automatic recognition of domain-specific terms: an experimental evaluation", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Fedorenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Astrakhantsev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Turdakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Russia", |
|
"middle": [], |
|
"last": "Kazan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Foo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Merkel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the LREC 2010 Workshop on Methods for automatic acquisition of Language Resources and their evaluation methods", |
|
"volume": "26", |
|
"issue": "", |
|
"pages": "49--54", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fedorenko, D., Astrakhantsev, N., and Turdakov, D. (2013). Automatic recognition of domain-specific terms: an experimental evaluation. In Proceedings of the Ninth Spring Researcher's Colloquium on Database and Infor- mation Systems, volume 26, pages 15-23, Kazan, Russia. Foo, J. and Merkel, M. (2010). Using machine learning to perform automatic term recognition. In Proceedings of the LREC 2010 Workshop on Methods for automatic acquisition of Language Resources and their evaluation methods, pages 49-54, Valetta, Malta. ELRA.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Feature-Less End-to-End Nested Term Extraction", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1908.05426" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gao, Y. and Yuan, Y. (2019). Feature-Less End-to-End Nested Term Extraction. arXiv:1908.05426 [cs, stat], August. arXiv: 1908.05426.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Fine-Grained Termhood Prediction for German Compound Terms Using Neural Networks", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "H\u00e4tty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Schulte Im Walde", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Joint Workshop on,Linguistic Annotation, Multiword Expressions and Constructions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "62--73", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H\u00e4tty, A. and Schulte im Walde, S. (2018a). Fine-Grained Termhood Prediction for German Compound Terms Us- ing Neural Networks. In Proceedings of the Joint Work- shop on,Linguistic Annotation, Multiword Expressions and Constructions (LAW-MWE-CxG-2018), pages 62- 73, Sante Fe, New Mexico, USA.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "A Laypeople Study on Terminology Identification across Domains and Task Definitions", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "H\u00e4tty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Schulte Im Walde", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of NAACL-HLT 2018", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "321--326", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H\u00e4tty, A. and Schulte im Walde, S. (2018b). A Laypeople Study on Terminology Identification across Domains and Task Definitions. In Proceedings of NAACL-HLT 2018, pages 321-326, New Orleans, USA. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Termeval 2020: Taln-ls2n system for automatic term extraction", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Hazem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Bouhandi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Boudin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Daille", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of CompuTerm", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hazem, A., Bouhandi, M., Boudin, F., and Daille, B. (2020). Termeval 2020: Taln-ls2n system for automatic term extraction. In Proceedings of CompuTerm 2020.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Unsupervised training set generation for automatic acquisition of technical terminology in patents", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Judea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Br\u00fcgmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of COLING 2014, the 25th international conference on computational linguistics: Technical Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "290--300", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Judea, A., Sch\u00fctze, H., and Br\u00fcgmann, S. (2014). Unsu- pervised training set generation for automatic acquisi- tion of technical terminology in patents. In Proceedings of COLING 2014, the 25th international conference on computational linguistics: Technical Papers, pages 290- 300, Dublin, Ireland.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Terminology Extraction and Management", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Kageura", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Marshman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "The Routledge Handbook of Translation and Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kageura, K. and Marshman, E. (2019). Terminology Ex- traction and Management. In O'Hagan, Minako, editor, The Routledge Handbook of Translation and Technology.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Methods of automatic term recognition", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Kageura", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Umino", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Terminology", |
|
"volume": "3", |
|
"issue": "2", |
|
"pages": "259--289", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kageura, K. and Umino, B. (1996). Methods of automatic term recognition. Terminology, 3(2):259-289.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Extraction of terminology in the field of construction", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Kessler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "B\u00e9chet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Berio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the First International Conference on Digital Data Processing (DDP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "22--26", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kessler, R., B\u00e9chet, N., and Berio, G. (2019). Extraction of terminology in the field of construction. In Proceedings of the First International Conference on Digital Data Processing (DDP), pages 22-26, London, UK. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "GE-NIA corpus -a semantically annotated corpus for biotextmining", |
|
"authors": [ |
|
{ |
|
"first": "J.-D", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Tateisi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Bioinformatics", |
|
"volume": "19", |
|
"issue": "1", |
|
"pages": "180--182", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kim, J.-D., Ohta, T., Tateisi, Y., and Tsujii, J. (2003). GE- NIA corpus -a semantically annotated corpus for bio- textmining. Bioinformatics, 19(1):180-182.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Optimized Term Extraction Method Based on Computing Merged Partial C-Values", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Kosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Chaves-Fraga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Dobrovolskyi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Ermolayev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Information and Communication Technologies in Education, Research, and Industrial Applications. ICTERI 2019", |
|
"volume": "1175", |
|
"issue": "", |
|
"pages": "24--49", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kosa, V., Chaves-Fraga, D., Dobrovolskyi, H., and Ermo- layev, V. (2020). Optimized Term Extraction Method Based on Computing Merged Partial C-Values. In Infor- mation and Communication Technologies in Education, Research, and Industrial Applications. ICTERI 2019, volume 1175 of Communications in Computer and IN- formation Science, pages 24-49. Springer International Publishing, Cham.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Term Extraction via Neural Sequence Labeling a Comparative Evaluation of Strategies Using Recurrent Neural Networks", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Kucza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Niehues", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Zenkel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Waibel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "St\u00fcker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2072--2076", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kucza, M., Niehues, J., Zenkel, T., Waibel, A., and St\u00fcker, S. (2018). Term Extraction via Neural Sequence Label- ing a Comparative Evaluation of Strategies Using Recur- rent Neural Networks. In Interspeech 2018, pages 2072- 2076, Hyderabad, India, September. ISCA.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Reference Lists for the Evaluation of Term Extraction Tools", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Loginova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Gojun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Blancafort", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Gu\u00e9gan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Gornostay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "U", |
|
"middle": [], |
|
"last": "Heid", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 10th International Congress on Terminology and Knowledge Engineering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2401--2407", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Loginova, E., Gojun, A., Blancafort, H., Gu\u00e9gan, M., Gornostay, T., and Heid, U. (2012). Reference Lists for the Evaluation of Term Extraction Tools. In Proceedings of the 10th International Congress on Terminology and Knowledge Engineering, Madrid, Spain. ACL. Loukachevitch, N. (2012). Automatic Term Recognition Needs Multiple Evidence. In Proceedings of LREC 2012, pages 2401-2407, Istanbul, Turkey. ELRA.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "TExSIS: Bilingual Terminology Extraction from Parallel Corpora Using Chunk-based Alignment", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Macken", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Lefever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Hoste", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Terminology", |
|
"volume": "19", |
|
"issue": "1", |
|
"pages": "1--30", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Macken, L., Lefever, E., and Hoste, V. (2013). TExSIS: Bilingual Terminology Extraction from Parallel Corpora Using Chunk-based Alignment. Terminology, 19(1):1- 30.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "The Termolator: Terminology Recognition Based on Chunking, Statistical and Search-Based Scores. Frontiers in Research Metrics and Analytics", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Meyers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Glass", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Ortega", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Liao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Grieve-Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Babko-Malaya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Meyers, A. L., He, Y., Glass, Z., Ortega, J., Liao, S., Grieve-Smith, A., Grishman, R., and Babko-Malaya, O. (2018). The Termolator: Terminology Recogni- tion Based on Chunking, Statistical and Search-Based Scores. Frontiers in Research Metrics and Analytics, 3.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "TBXTools: A Free, Fast and Flexible Tool for Automatic Terminology Extraction", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Oliver", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Vazquez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of Recent Advances in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "473--479", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oliver, A. and Vazquez, M. (2015). TBXTools: A Free, Fast and Flexible Tool for Automatic Terminology Ex- traction. In Proceedings of Recent Advances in Natural Language Processing, pages 473-479, Hissar, Bulgaria.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Termeval 2020: Using tsr filtering method to improve automatic term extraction", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Oliver", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "V\u00e0zquez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of CompuTerm", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oliver, A. and V\u00e0zquez, M. (2020). Termeval 2020: Using tsr filtering method to improve automatic term extrac- tion. In Proceedings of CompuTerm 2020.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Termeval 2020: Racai's automatic term extraction system", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Pais", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Ion", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of Com-puTerm 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pais, V. and Ion, R. (2020). Termeval 2020: Racai's au- tomatic term extraction system. In Proceedings of Com- puTerm 2020.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Karst Exploration: Extracting Terms and Definitions from Karst Domain Corpus", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Pollak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Repar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Martinc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Podpe\u010dan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of eLex 2019", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "934--956", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pollak, S., Repar, A., Martinc, M., and Podpe\u010dan, V. (2019). Karst Exploration: Extracting Terms and Def- initions from Karst Domain Corpus. In Proceedings of eLex 2019, pages 934-956, Sintra, Portugal.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Investigating Context Parameters in Technology Term Recognition", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Qasemizadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Handschuh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of SADAATL 2014", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--10", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qasemizadeh, B. and Handschuh, S. (2014). Investigating Context Parameters in Technology Term Recognition. In Proceedings of SADAATL 2014, pages 1-10, Dublin, Ire- land.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "The ACL RD-TEC 2.0: A Language Resource for Evaluating Term Extraction and Entity Recognition Methods", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Qasemizadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A.-K", |
|
"middle": [], |
|
"last": "Schumann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of LREC 2016", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1862--1868", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qasemizadeh, B. and Schumann, A.-K. (2016). The ACL RD-TEC 2.0: A Language Resource for Evaluating Term Extraction and Entity Recognition Methods. In Proceed- ings of LREC 2016, pages 1862-1868, Portoro\u017e, Slove- nia. ELRA.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Multiword Expressions in the wild? The mwetoolkit comes in handy", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Coling 2010: Demonstration Volume", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "57--60", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Multiword Expressions in the wild? The mwetoolkit comes in handy. In Coling 2010: Demonstration Vol- ume, pages 57-60, Beijing, China.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Analysing the Impact of Supervised Machine Learning on Automatic Term Extraction: HAMLET vs TermoStat", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Rigouts Terryn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Drouin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Hoste", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Lefever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of RANLP 2019", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rigouts Terryn, A., Drouin, P., Hoste, V., and Lefever, E. (2019a). Analysing the Impact of Supervised Machine Learning on Automatic Term Extraction: HAMLET vs TermoStat. In Proceedings of RANLP 2019, Varna, Bul- garia.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "In No Uncertain Terms: A Dataset for Monolingual and Multilingual Automatic Term Extraction from Comparable Corpora", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Rigouts Terryn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Hoste", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Lefever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Language Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--34", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rigouts Terryn, A., Hoste, V., and Lefever, E. (2019b). In No Uncertain Terms: A Dataset for Monolingual and Multilingual Automatic Term Extraction from Compara- ble Corpora. Language Resources and Evaluation, pages 1-34.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Compasses, Magnets, Water Microscopes", |
|
"authors": [ |
|
{ |
|
"first": "A.-K", |
|
"middle": [], |
|
"last": "Schumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Fischer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of LREC 2016", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3578--3584", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Schumann, A.-K. and Fischer, S. (2016). Compasses, Magnets, Water Microscopes. In Proceedings of LREC 2016, pages 3578-3584, Portoro\u017e, Slovenia. ELRA.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Similarity Driven Unsupervised Learning for Materials Science Terminology Extraction", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Sarath", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Shreedhar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Computaci\u00f3n y Sistemas", |
|
"volume": "23", |
|
"issue": "3", |
|
"pages": "1005--1013", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shah, S., Sarath, S., and Shreedhar, R. (2019). Simi- larity Driven Unsupervised Learning for Materials Sci- ence Terminology Extraction. Computaci\u00f3n y Sistemas, 23(3):1005-1013.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "BioNLP Shared Task 2011: Supporting Resources", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Stenetorp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Topi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Pyysalo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J.-D", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of BioNLP Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stenetorp, P., Topi\u0107, G., Pyysalo, S., Ohta, T., Kim, J.-D., and Tsujii, J. (2011). BioNLP Shared Task 2011: Sup- porting Resources. In Proceedings of BioNLP Shared Task 2011 Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Annotation s\u00e9mantique et terminologique avec la plateforme SMARTIES", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Termith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "TermITH, P. (2014). Annotation s\u00e9mantique et termi- nologique avec la plateforme SMARTIES.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Improving term extraction by combining different techniques", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Vivaldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Rodr\u00edguez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Terminology", |
|
"volume": "7", |
|
"issue": "1", |
|
"pages": "31--48", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vivaldi, J. and Rodr\u00edguez, H. (2001). Improving term ex- traction by combining different techniques. Terminol- ogy, 7(1):31-48, December.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Improving term candidates selection using terminological tokens", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "V\u00e0zquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Oliver", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Terminology", |
|
"volume": "24", |
|
"issue": "1", |
|
"pages": "122--147", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "V\u00e0zquez, M. and Oliver, A. (2018). Improving term can- didates selection using terminological tokens. Terminol- ogy, 24(1):122-147, May.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Featureless Domain-Specic Term Extraction with Minimal Labelled Data", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of Australasian Language Technology Association Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "103--112", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wang, R., Liu, W., and McDonald, C. (2016). Featureless Domain-Specic Term Extraction with Minimal Labelled Data. In Proceedings of Australasian Language Technol- ogy Association Workshop, pages 103-112, Melbourne, Australia.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Adapted TextRank for Term Extraction: A Generic Method of Improving Automatic Term Extraction Algorithms", |
|
"authors": [ |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Petrak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maynard", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ACM Transactions on Knowledge Discovery from Data", |
|
"volume": "12", |
|
"issue": "5", |
|
"pages": "1--7", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhang, Z., Petrak, J., and Maynard, D. (2018). Adapted TextRank for Term Extraction: A Generic Method of Improving Automatic Term Extraction Algorithms. ACM Transactions on Knowledge Discovery from Data, 12(5):1-7.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Evaluating Automatic Term Extraction Methods on Individual Documents", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Sajatovi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Buljan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "\u0160najder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Ba\u0161i\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Joint Workshop on Multiword Expressions and WordNet (MWE-WN 2019)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "149--154", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sajatovi\u0107, A., Buljan, M.,\u0160najder, J., and Ba\u0161i\u0107, B. D. (2019). Evaluating Automatic Term Extraction Meth- ods on Individual Documents. In Proceedings of the Joint Workshop on Multiword Expressions and WordNet (MWE-WN 2019), pages 149-154, Florence, Italy. ACL. 7. Language Resource References", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Annotated Corpora for Term Extraction Research (ACTER)", |
|
"authors": [ |
|
{ |
|
"first": "Rigouts", |
|
"middle": [], |
|
"last": "Terryn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ayla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Drouin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hoste", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "V\u00e9ronique", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Els", |
|
"middle": [], |
|
"last": "Lefever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Ghent University", |
|
"volume": "1", |
|
"issue": "2", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rigouts Terryn, Ayla and Drouin, Patrick and Hoste, V\u00e9ronique and Lefever, Els. (2020). Annotated Corpora for Term Extraction Research (ACTER). Ghent Univer- sity, 1.2.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Recall for terms with various frequencies per team in English, including Named Entities Figure 3: Recall per term length (single-word terms (swts) to terms with over 5 tokens (5+wts) for each team in English, including Named Entities", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"content": "<table><tr><td># Annotations</td></tr></table>", |
|
"text": "Number of documents and words in the entire corpus vs. the annotated part of each corpus in ACTER 1.2", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"content": "<table/>", |
|
"text": "Number of annotations (counting all annotations separately or all unique annotations) of terms and Named Entities (NEs), per corpus in ACTER 1.2 a different meaning through different capitalisation options or POS patterns, they only count as a single annotation in this version. For example, the English corpus on dressage contains the term bent (verb -past tense of to bend), but also Bent (proper noun -person name). While both capitalisation and POS differ, and bent is not the lemmatised form, there is only one entry: bent (lowercased) in the gold standard (other full forms of the verb to bend have separate entries, if they are present and annotated in the corpus)", |
|
"html": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |