|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:29:24.392409Z" |
|
}, |
|
"title": "Identifying Incorrect Labels in the CoNLL-2003 Corpus", |
|
"authors": [ |
|
{ |
|
"first": "Frederick", |
|
"middle": [], |
|
"last": "Reiss", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research -Almaden", |
|
"location": { |
|
"postCode": "95120", |
|
"settlement": "San Jose", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Hong", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Bryan", |
|
"middle": [], |
|
"last": "Cutler", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Muthuraman", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [], |
|
"last": "Eichenberger", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research -Almaden", |
|
"location": { |
|
"postCode": "95120", |
|
"settlement": "San Jose", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "The CoNLL-2003 corpus for Englishlanguage named entity recognition (NER) is one of the most influential corpora for NER model research. A large number of publications, including many landmark works, have used this corpus as a source of ground truth for NER tasks. In this paper, we examine this corpus and identify over 1300 incorrect labels (out of 35089 in the corpus). In particular, the number of incorrect labels in the test fold is comparable to the number of errors that state-of-the-art models make when running inference over this corpus. We describe the process by which we identified these incorrect labels, using novel variants of techniques from semi-supervised learning. We also summarize the types of errors that we found, and we revisit several recent results in NER in light of the corrected data. Finally, we show experimentally that our corrections to the corpus have a positive impact on three state-ofthe-art models.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "The CoNLL-2003 corpus for Englishlanguage named entity recognition (NER) is one of the most influential corpora for NER model research. A large number of publications, including many landmark works, have used this corpus as a source of ground truth for NER tasks. In this paper, we examine this corpus and identify over 1300 incorrect labels (out of 35089 in the corpus). In particular, the number of incorrect labels in the test fold is comparable to the number of errors that state-of-the-art models make when running inference over this corpus. We describe the process by which we identified these incorrect labels, using novel variants of techniques from semi-supervised learning. We also summarize the types of errors that we found, and we revisit several recent results in NER in light of the corrected data. Finally, we show experimentally that our corrections to the corpus have a positive impact on three state-ofthe-art models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The English-language portion of the CoNLL-2003 shared task (Tjong Kim Sang and De Meulder, 2003) (henceforth CoNLL-2003) is one of the most widely-used benchmarks for named entity recognition (NER) models. It consists of news articles from the Reuters RCV1 corpus (Lewis et al., 2004) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 96, |
|
"text": "(Tjong Kim Sang and De Meulder, 2003)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 97, |
|
"end": 120, |
|
"text": "(henceforth CoNLL-2003)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 264, |
|
"end": 284, |
|
"text": "(Lewis et al., 2004)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Since its debut, CoNLL-2003 has played a central role in NLP research. Over 2300 research papers have cited the original CoNLL-2003 paper 1 . Among these works, many are landmark results that have revolutionized the field of natural language processing, including Glove embeddings (Pennington et al., 2014) , BERT embeddings (Devlin et al., 2019) , conditional random fields (Sutton and McCallum, 2012) , and bidirectional LSTM models (Lample et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 281, |
|
"end": 306, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 325, |
|
"end": 346, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 375, |
|
"end": 402, |
|
"text": "(Sutton and McCallum, 2012)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 435, |
|
"end": 456, |
|
"text": "(Lample et al., 2016)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The CoNLL-2003 corpus continues to be used in NER research. The Papers with Code website (Paper with Code, 2020), which tracks state-of-the-art F1 scores 2 for this corpus, currently (as of July 2020) shows 43 results from 2016 through 2019 that improved this metric.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "While researchers have relied heavily on the CoNLL-2003 corpus as a source of ground truth, few have paid attention to the corpus itself. Errors in the corpus could potentially mislead and even divert the course of future research. Recent work has pointed out that improper benchmarking can have significant impact on evaluating machine learning algorithms (Smith-Miles et al., 2014) . The fact that Stanislawek et al. (2019) and Wang et al. (2019) found many errors while examining parts of the corpus is even more alarming. A detailed examination of the corpus has become imperative.", |
|
"cite_spans": [ |
|
{ |
|
"start": 357, |
|
"end": 383, |
|
"text": "(Smith-Miles et al., 2014)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 400, |
|
"end": 425, |
|
"text": "Stanislawek et al. (2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 430, |
|
"end": 448, |
|
"text": "Wang et al. (2019)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we present our work on correcting labeling errors in the CoNLL-2003 corpus. Section 2 gives an overview of the corpus itself, the high-level process we followed, and related work. Section 3 describes how we used a novel form of semi-supervised labeling to identify potentiallyincorrect labels. Sections 4 and 5 describe how we examined and categorized the flagged labels. And Sections 6 and 7 describe how we created a corrected version of the corpus and reevaluated past results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The CoNLL-2003 corpus contains news articles from a subset of the Reuters RCV1 corpus (Lewis et al., 2004) . Entities are tagged using an extended version of the tagging policy from the Message Understanding Conference (Tjong Kim Sang and De Meulder, 2003) (MUC) , with the addition of a new tag MISC to cover entities not mentioned in MUC's labeling rules. The data consists of text files in which each line holds information about one token. Associated with each token are tags in insideoutside-begin (IOB) format (Ramshaw and Marcus, 1995) . The files, eng.train, eng.testa, and eng.testb, contain the train , dev , and test folds of the corpus, respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 86, |
|
"end": 106, |
|
"text": "(Lewis et al., 2004)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 219, |
|
"end": 256, |
|
"text": "(Tjong Kim Sang and De Meulder, 2003)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 257, |
|
"end": 262, |
|
"text": "(MUC)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 516, |
|
"end": 542, |
|
"text": "(Ramshaw and Marcus, 1995)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this paper, we identify and correct labeling errors in the CoNLL-2003 corpus. We used a semisupervised approach to flag potentially-incorrect labels in the corpus, then manually reviewed the labels thus flagged.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our Work", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Our approach builds on previous work in semisupervised labeling, with some key differences. Because we were looking for incorrect labels in a corpus that already had many high-quality labels, we needed a sieve with especially high sensitivity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our Work", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We used ensembles of NER models trained on the corpus, and we focused on cases where the models agreed strongly on a particular label, but that label does not appear in the corpus. One of these ensembles was the outputs of the original 16 entries in the 2003 competition. We also trained two other 17-model ensembles ourselves by applying Gaussian random projections to the BERT embeddings space.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our Work", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We deliberately used models with F1 scores significantly below the state of the art. To find incorrect labels, we needed models that disagree with the original CoNLL-2003 corpus. Our initial experiments with the CoNLL-2003 competition entries showed that this ensemble, with F1 scores between 0.6 and 0.88, was particularly effective for finding incorrect labels. We tuned the models that we trained ourselves to have F1 scores in this range.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our Work", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Our technique flagged 3182 out of a total of 35089 entity labels. Manual inspection determined that 850 of these labels -27% -were incorrect. We also found 470 additional incorrect labels in close proximity to the labels that our techniques flagged, for a total of 1320 incorrect labels across the corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our Work", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Of a particular note, our analysis found 421 incorrect labels in the test fold. The test fold for this corpus contains 5648 labels. An F1 score of 0.93, as current state-of-the-art models produce, corresponds to approximately 400 errors on this fold. The change in F1 score over the past 17 years (0.934 -0.888 = 0.046) corresponds to eliminating approximately 300 errors. The error rate of stateof-the-art models is comparable to the error rate of the corpus itself. We used the results of our hand labeling to build a corrected version of the corpus. Then we reevaluated the original entries in the competition, plus selected NER models from recent work, over the corrected corpus. Figure 1 shows how the reported accuracy of these models changed. Surprisingly, we did not observe any change in the relative ranking of the models. Even though we corrected almost 8% of the labels in the test fold, no model's F1 score changed by more than 0.01. Without retraining, the changes in F1 score were all in the downward direction, but the F1 scores of the more sophisticated models dropped by less. The highest F1 score dropped from 0.932 to 0.927, while the lowest dropped from 0.601 to 0.589. When we retrained the three stateof-the-art models on the corrected data, their F1 scores became higher than their original scores.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 684, |
|
"end": 692, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Our Work", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We have shared the full data set for this paper at", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reproducibility", |
|
"sec_num": "2.1.1" |
|
}, |
|
{ |
|
"text": "https://github.com/CODAIT/ Identifying-Incorrect-Labels-In-CoNLL-2003.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reproducibility", |
|
"sec_num": "2.1.1" |
|
}, |
|
{ |
|
"text": "This data set includes a complete list of the errors that we found in the corpus, with notes from the labelers about the nature of each error. We also include scripts for generating a corrected version of the full CoNLL-2003 corpus in its original format. We have also released the code for our experiments as part of our open source Text Extensions for Pandas project 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reproducibility", |
|
"sec_num": "2.1.1" |
|
}, |
|
{ |
|
"text": "Most of the previous work we have mentioned so far has treated the CoNLL-2003 corpus as ground truth. Two recent exceptions to this trend are Stanislawek et al. (2019) and Wang et al. (2019) . Stanislawek et al. (2019) identified some of the same incorrect labels that we found. This paper categorized the errors that modern NER models make on the test fold of the corpus. As a sideeffect of the error analysis, the authors of this paper flagged cases where the output of a model had been considered \"wrong\" because a label in the corpus was incorrect. The authors identified 99 such errors in the test fold of this corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 142, |
|
"end": 167, |
|
"text": "Stanislawek et al. (2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 172, |
|
"end": 190, |
|
"text": "Wang et al. (2019)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 193, |
|
"end": 218, |
|
"text": "Stanislawek et al. (2019)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "There are several important differences between this paper and our work. Stanislawek et al. (2019) flagged errors as a side-effect of another task, while our primary focus was on identifying as many errors as possible. Due to our broader focus, we identified 421 errors in the test fold, compared to the 99 errors they found. We also examined the other two folds of the corpus, while the previous paper focused only on the test fold. The previous paper used models with high precision and recall; and they examined all the incorrect outputs of these models. We deliberately used less accurate models so as to widen the scope of potential errors flagged, and we focused on cases where there was strong agreement between these models plus disagreement with the ground truth data. Wang et al. (2019) hired human labelers to label all sentences in the test fold of the corpus and found that 5.38% of sentences in this fold contained errors. This number is a lower error rate than we report, mostly due to the fact that the labelers did not look for errors in tokenization or sentence identification. Excluding those types of errors, our work flagged 348 out of 5648 entities in the test fold, for an error rate of 6.16%. We attribute the remaining 0.78 percent increase in error rate to the fact that our labelers examined entire documents and looked for consistency across doc-uments, while Wang et al. (2019) 's labelers only viewed individual sentences in isolation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 73, |
|
"end": 98, |
|
"text": "Stanislawek et al. (2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 778, |
|
"end": 796, |
|
"text": "Wang et al. (2019)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1388, |
|
"end": 1406, |
|
"text": "Wang et al. (2019)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Beyond the larger number of types of errors we searched for, there are two other important differences between our work and that of Wang et al. (2019) . We developed a novel semi-supervised approach to identifying incorrect labels, and we used this approach to examine the entire corpus instead of just the test fold.", |
|
"cite_spans": [ |
|
{ |
|
"start": 132, |
|
"end": 150, |
|
"text": "Wang et al. (2019)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Our general approach of training an ensemble of models, then focusing attention on areas where most of the models disagree with the existing labels, has parallels to other work on human-in-the-loop methods for creating ground truth. Liang et al. (2017) used confidence estimates from a model trained on a data set to flag potential errors in the same data set for further review. The specific NLP task studied in that work was that of extracting a list of patient problems from an electronic medical record.", |
|
"cite_spans": [ |
|
{ |
|
"start": 233, |
|
"end": 252, |
|
"text": "Liang et al. (2017)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Fusing together the output of multiple models and/or rules is a also common approach when using weak supervision to train models over unlabeled NLP corpora. Lison et al. (2020) used hidden Markov models to generate labeled NER data from the outputs of multiple labeling functions. The Snorkel system (Ratner et al., 2020 ) provides a general framework for using the outputs of labeling functions to estimate both labels and the confidence of those labels.", |
|
"cite_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 176, |
|
"text": "Lison et al. (2020)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 300, |
|
"end": 320, |
|
"text": "(Ratner et al., 2020", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The data management and data mining communities have a long history of building systems and algorithms to identify errors in ground truth data. Abedjan et al. (2016) provide a through survey. Although the primary focus of this previous work was on structured data, subtasks like address normalization have an NLP component.", |
|
"cite_spans": [ |
|
{ |
|
"start": 144, |
|
"end": 165, |
|
"text": "Abedjan et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We did not set out to relabel the CoNLL-2003 corpus. When we started looking at this corpus, our intent was to identify entity mentions that older models are not able to extract, but that state-of-theart models are able to extract. We had hoped to use this information to drive continued improvements to these models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automated Labeling", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The competition. These entrants used a variety of different models, drawing on the technology available at the time.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Initial Results", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We believed that these model outputs would provide an objective picture of what kinds of entities were difficult to extract for state-of-the-art models circa 2003. We hypothesized that there would be entity mentions that none of the models could extract correctly, due to limitations of 2003-era technology. We further believed that modern models would be able to tag some of these previously impossible mentions. To test this hypothesis, we aggregated together the outputs of the original entrants to find these \"difficult\" entities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Initial Results", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The corpus ships as a collection of tokens with tags in IOB format. Using Text Extensions for Pandas 5 , a library of extension types for pandas DataFrames (Reback et al., 2020; McKinney et al., 2010) , we translated the labeled tokens of the corpus into entity mentions -that is, spans of tokens within the corpus's document, plus the corresponding entity type tag for each span.", |
|
"cite_spans": [ |
|
{ |
|
"start": 156, |
|
"end": 177, |
|
"text": "(Reback et al., 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 178, |
|
"end": 200, |
|
"text": "McKinney et al., 2010)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Initial Results", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We performed the same translation on each of the entrants' outputs. This process produced seventeen sets of entity mentions: One for the original corpus and one for each of the sixteen entrants. Next, we merged these sets together to find the mentions that were present in the original corpus but were not present in the competition entries.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Initial Results", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Then we looked at some of these entity mentions in the context of the original news articles, and our original hypothesis fell apart. About one third the examples we looked at turned out to be incorrect la-5 https://github.com/CODAIT/ text-extensions-for-pandas bels. It would be hard to argue that these \"incorrect\" answers were due to inadequacies of early-2000's technology, when it was in fact the corpus that was incorrect.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Initial Results", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Next, we took a slightly different view of the aggregate data we had. Instead of looking for entity mentions that were in the corpus but not in the entrants' outputs, we looked for entity mentions that were in all the entrants' outputs but were not in the corpus. As before, a third of the examples that we looked at involved incorrect or missing labels. We decided at this point to focus on identifying and correcting these incorrect labels.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Initial Results", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The model outputs from the original CoNLL-2003 competition had proven useful for zeroing in on incorrect labels, but this data had a significant shortcoming. The model outputs only cover the dev and test folds of the corpus. No model outputs on the train fold are available. To apply the technique we had used so far to the train fold, we would need to train our own collection of models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Custom Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We used a BERT embeddings layer from the transformers open source library (Wolf et al., 2019) , tuned on the CoNLL-2003 corpus, to produce BERT embeddings over sliding windows of text from the train fold. Then we applied 16 different Gaussian random projections to these 768-dimensional embeddings to reduce them to between 32 and 256 dimensions. We trained multinomial logistic regression classifiers over these random projections. We also trained an additional classifier over the full embeddings, for a total of 17 different models. Our goal in building these models was not to attain the highest possible precision and recall. In fact, high levels of accuracy could be detrimental to our task, as high levels of accuracy imply a high congruence with the ground-truth labels we were trying to correct. Instead, we wanted a collection of models that would produce diverse results and F1 scores in line with the accuracy of the original CoNLL-2003 entrants.", |
|
"cite_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 93, |
|
"text": "(Wolf et al., 2019)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Custom Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "With one exception, the CoNLL-2003 competition produced F1 scores between 77% and 89% on the test fold. We tuned our models' training and inference until they produced results approximately within this range. Figure 2 shows the resulting F1 scores on the test fold. Figure 2a shows the original CoNLL-2003 competition entries' F1 scores, while Figure 2b shows the F1 scores of the models we trained, plotted against the dimensionality of their Gaussian random projection stages.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 209, |
|
"end": 217, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 266, |
|
"end": 275, |
|
"text": "Figure 2a", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 344, |
|
"end": 353, |
|
"text": "Figure 2b", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Training Custom Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Based on published results on BERT embeddings for NER, we expect that additional tuning would have raised the F1 scores of our models by about 0.02. We judged that the lower F1 scores in Figure 2 are better for this application.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 187, |
|
"end": 195, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Training Custom Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "As with our previous analysis of the original competition results, we aggregated together the outputs of these 17 models on the test fold of the corpus, then aligned these results with the corpus labels. A manual spot-check of these aligned results verified that these aggregated results also functioned as an effective sieve for identifying incorrect labels. Roughly half of the entity mentions that were found by all 17 models but were not in the corpus were due to incorrect or missing labels in the corpus. We found similar results on the dev fold.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Custom Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Next, we applied our ensemble of models to the train fold of the corpus and compared the aggregated results against the corpus's labels. As with the test fold, we were able to use the aggregate model outputs to identify a list of entity mentions with a high fraction of incorrect corpus labels. However, this list was significantly shorter than the lists we were able to produce on the test and dev folds. Because the models were themselves trained on the train fold, there were fewer discrepancies between the model outputs and the corpus labels.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cross-Validation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "To produce a larger list of potentially incorrect labels, we divided the entire corpus randomly into ten folds and performed a ten-fold cross-validation. For each of the ten folds, we retrained our ensemble of models on the other 9 folds and ran model inference on the current fold. This process involved training 170 different models, but because we only needed to generate the BERT embeddings once, we were able to perform all training in a few hours on a 4-year-old MacBook.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cross-Validation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Interestingly, this cross-validation approach produced models with significantly higher F1 scores on the random holdout sets, compared with our earlier approach of training on the train fold and testing on the test fold. As Figure 2c shows, F1 scores for the holdout sets for each of the models -which together encompass the entire corpusranged from 0.89 to 0.94, an increase of roughly 5%.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 224, |
|
"end": 233, |
|
"text": "Figure 2c", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Cross-Validation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We attribute this improvement to the nonrandom split of the original corpus. The contest judges used article publication date to split the corpus into folds. The train and dev folds used articles from August of 1996, while the test fold was from December of that year (Tjong Kim Sang and De Meulder, 2003) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 268, |
|
"end": 305, |
|
"text": "(Tjong Kim Sang and De Meulder, 2003)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cross-Validation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "This non-random split matches common industry practices 6 . However, dividing the the corpus by time means that any systematic changes in the target domain over time are not visible to the optimizer during training. Models trained on a random sample of the corpus are able to achieve a higher F1 score because they have better information about the types of articles that were published in December 1996.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cross-Validation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In both ensembles that we trained, our model outputs aligned well with the labels on the train fold. Consequently, our sieve identified fewer potentially incorrect labels in the train fold of the corpus, which in turn would lead to our identifying fewer incorrect labels during manual relabeling. Better accuracy led to worse results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cross-Validation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Each of our three ensembles produced two lists of labels: one list of labels that were in the corpus but not in the model outputs; and a second list of labels that were in the model outputs but not in the corpus. Overall, we produced six lists of potentially-incorrect labels. Four of these lists spanned the entire corpus, while the remaining two (from the original contest entries) only spanned the test and dev folds.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hand Labeling", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We proceeded to examine these lists by hand, looking at each flagged label in the context of the target document. We focused on the labels where there was a strong agreement between the models in each ensemble. We started out by examining the labels where all models agreed, then moved onto the labels where all models but one agreed, and so on. As we progressed to labels with less agreement among models, the fraction of flagged labels that was actually incorrect decreased. When this fraction dropped below 20 percent, we stopped going through the ordered list of flagged labels.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hand Labeling", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For each list of potentially-incorrect labels, one member of our team examined the labels, and a second member of our team audited the decisions that the first member had made. In total, we made 12 passes (3 ensembles \u00d7 2 sets of labels \u00d7 2 human reviewers) of manual review over the train and test folds of the corpus and 8 passes over the test fold.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hand Labeling", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "When we found that a label was incorrect, we coded the type of error and the required correction so that the error could be corrected automatically later on. We divided errors into several categories:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hand Labeling", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Tag: The corpus correctly identifies the span of an entity mention, but the span is associated with the wrong entity type.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hand Labeling", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Span: The corpus correctly identifies the type of an entity mention, but the boundaries of the span of tokens containing the mention are incorrect.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hand Labeling", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Both: The corpus correctly identifies an entity mention, but both the tag and the span boundaries are incorrect.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hand Labeling", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Wrong: The corpus incorrectly identifies an entity mention.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hand Labeling", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Sentence: The corpus contains an incorrect sentence boundary, and as a result the span and/or tag of one or more entity mentions are incorrect. This type of error especially problematic because incorrect labels on both sides of the sentence boundary count as two mistakes when computing precision and recall.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hand Labeling", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Token: The corpus contains an incorrect token boundary, and as a result the span and/or tag of one or more entity mentions are incorrect.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hand Labeling", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Appendix 9.1 shows examples of each error type. The data set that we have published as a companion to this paper (See Section 2.1.1) includes complete lists of the errors that we found, both before and after manual review.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hand Labeling", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Each manual pass over the corpus involved validating a set of suggested changes, not reannotating the corpus in its entirety. As a result, conventional metrics of inter-annotator agreement between our human evaluators do not apply. Instead, we report the similarity between the outputs of the three ensembles. Table 1 summarizes the Jaccard similarity between the three ensembles' outputs before and after manual review. Figure 4 shows a Venn diagram view of the relationship between the sets of flagged labels after manual review. The raw outputs of the two BERT-based ensembles showed a high degree of overlap, but this overlap reduced substantially after manual review. The original models flagged a very different set of labels from the BERT based models, especially after manual review.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 310, |
|
"end": 317, |
|
"text": "Table 1", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 421, |
|
"end": 429, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Inter-Annotator Agreement", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In total, we examined 3182 labels our ensembles had flagged in the three folds of the corpus. We considered any label where fewer than 7 models agreed with the corpus label to be \"flagged\". Of these labels, 1274 came from the test fold, 854 came from the dev fold, and 1054 came from the train fold; accounting for 22.6%, 14.3%, and 4.5% of their folds, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Incorrect Labels Identified", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "As we noted in Section 3.3, our models had significantly higher F1 scores on the train fold, both with and without cross-validation. Because model outputs were closer to the corpus labels, the ensembles flagged fewer labels on this fold. However, the fraction of these labels that were actually incorrect was higher than that on the other folds: 34% versus 23%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Incorrect Labels Identified", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Of the errors correctly flagged, 184 were found by the ensemble composed of the original entrants' results; 641 were flagged by our custom models; and 275 errors were found by custom models with cross-validation. 372 of these errors were correctly flagged by two or more approaches. While ex-amining the affected documents, we found 470 additional errors in the vicinity of flagged errors. Figure 3 shows the distribution of errors broken down by error type and source. The most frequent error type we found in the corpus was a Tag type error, accounting for 48% of errors in total. Both type errors were least frequent.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 390, |
|
"end": 398, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Incorrect Labels Identified", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Our BERT-based models found a higher fraction of Sentence type errors, largely because these models were able to express spans that cross sentence boundaries. The entrants' outputs in our first ensemble, being constrained by the IOB file format, were physically incapable of expressing a span that crosses a sentence boundary. We also suspect that many of these older models operated on one sentence at a time, while the document context feeding our BERT embeddings could span multiple sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Incorrect Labels Identified", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Had we been aiming to maximize the F1 scores of our BERT-based models, we would have postprocessed the outputs of these models to split spans along sentence boundaries. This lack of postprocessing led to a decrease in F1 score, but it enabled us to find more errors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Incorrect Labels Identified", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The distribution of error types remained relatively constant across folds, with one exception: Sentence errors accounted for a much larger fraction in the train fold -26% of errors, as opposed to the 8% and 10% rates in the dev and test folds, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Incorrect Labels Identified", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "After identifying incorrect tags, spans and sentence boundaries, we created a corrected version of the original CoNLL-2003 corpus, which we refer to as the corrected CoNLL-2003 corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corrected CoNLL-2003 Corpus", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We used the Text Extensions for Pandas library to parse the original corpus and extract tokens and spans for each entity. We created data files containing all of the vetted corrections from our hand labeling of ensemble outputs. We wrote a script that applies all of these corrections to the CoNLL-2003 corpus, producing a corrected version of the corpus. For information on how to obtain the code and data necessary to recreate our corrected corpus, as well as all the experiment code for this paper, see Section 2.1.1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corrected CoNLL-2003 Corpus", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In this section, we first re-evaluate the entries from the original competition against the corrected test fold of the corpus. We then re-evaluate the metrics of three state-of-the-art NER models from recent literature on the corrected corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Evaluation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We evaluated the original 16 CoNLL-2003 competition entries on the original and corrected CoNLL-2003 test folds. Before evaluating on the corrected data, we needed to adjust sentence boundaries and tokenization in the entrants' output files to match that of the corrected corpus. The evaluation metric for this corpus relies on perfect alignment between tokens and sentences of the files being compared. When we split a token, we copied the token's label to the new, smaller tokens.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Re-evaluation of the Original Competition Entries", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "We recomputed precision, recall, and F1 scores. Our results are shown in Table 2 and Figure 1 . All of the entries have lower precision, recall, and F1 scores on the corrected CoNLL-2003 test fold than on the original test fold. Although we changed nearly 8% of the labels in the test fold, all the models' metrics decreased by 1% or less.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 73, |
|
"end": 80, |
|
"text": "Table 2", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 85, |
|
"end": 93, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Re-evaluation of the Original Competition Entries", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "The more accurate entries saw their F1 scores decline by less than the entries with lower F1 scores. For example, the top-scoring entry's F1 score dropped by 0.0054, while the bottom-scoring entry dropped by 0.0122 -more than twice as much. As a result, the ranking of entries did not change. It appears that the errors in the original corpus penalize models that produce answers closer to the actual ground truth.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Re-evaluation of the Original Competition Entries", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "Since we did not have access to the original models, we only performed inference and scoring on the corrected CoNLL-2003 corpus. We expect that the metrics would improve if the models are entirely re-trained on the corrected corpus' train fold. This would constitute relevant future work and point towards new reliable benchmarks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Re-evaluation of the Original Competition Entries", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "We evaluated three state-of-the-art NER models. We selected three models (Akbik et al., 2018 (Akbik et al., , 2019 Devlin et al., 2019) according to the ranking of models on the CoNLL-2003 NER task compiled on Papers with Code (Paper with Code, 2020) 7 . Table 3 summarizes our experimental results. We have the following observations. 7 We initially planned to select all of the models that rank top 10 from (Paper with Code, 2020). However, we were able to reproduce only three of them. We were unable to apply the rest of the models for the following technical reasons: two of which we requested code from the authors never received any responses; one of which we could find code but there is no instruction on how to use the code; three of which we could find code with instructions but we could not reproduce by following the instructions; one of which uses a nonstandard tagging scheme. We have contacted the authors of all of these papers for help with their code.", |
|
"cite_spans": [ |
|
{ |
|
"start": 73, |
|
"end": 92, |
|
"text": "(Akbik et al., 2018", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 93, |
|
"end": 114, |
|
"text": "(Akbik et al., , 2019", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 115, |
|
"end": 135, |
|
"text": "Devlin et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 336, |
|
"end": 337, |
|
"text": "7", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 255, |
|
"end": 262, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results on Recent Models", |
|
"sec_num": "7.2" |
|
}, |
|
{ |
|
"text": "Original Table 3 : Experimental results for recent models. We trained each of the three models (Akbik et al., 2018) , (Akbik et al., 2019) , and (Devlin et al., 2019) on the original and corrected train folds, respectively. For each trained model, we evaluated on the original and corrected test folds, respectively. For (Akbik et al., 2018) and (Akbik et al., 2019) , we trained on both train and dev folds. For (Devlin et al., 2019) , we trained on the train fold. For all models, we used the hyperparameter settings specified in their respective papers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 115, |
|
"text": "(Akbik et al., 2018)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 118, |
|
"end": 138, |
|
"text": "(Akbik et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 145, |
|
"end": 166, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 321, |
|
"end": 341, |
|
"text": "(Akbik et al., 2018)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 346, |
|
"end": 366, |
|
"text": "(Akbik et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 413, |
|
"end": 434, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 16, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "test Fold", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "On the corrected test fold, all the listed metrics (the F1 scores, precision, and recall) are higher for the models trained on the corrected corpus than those on the original corpus. This indicates that our correction on the corpus has a positive impact on the quality of training of the three models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Comparing the metrics of models trained and evaluated on the original corpus (the top-left section of the table) and the metrics of models trained and evaluated on the corrected corpus (the bottomright section of the table), we see that all metrics have been improved on the corrected corpus. This might indicate that these three models are actually more effective (according to the evaluation on the corrected corpus) than they were thought to be (according to the evaluation on the original corpus).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "However, on the original test fold, all the listed metrics (the F1 scores, precision, and recall) are not higher for the models trained on the original corpus than those on the corrected corpus. This might be explained by the fact that the errors in the original test fold are not consistent with the original train and dev folds, hence models trained on the original corpus are not necessarily more advantageous than those trained on the corrected corpus when evaluated on the original test fold.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For models trained on the original corpus, all the listed metrics (the F1 scores, precision, and recall) on the corrected test fold are very close to those on the original test fold (differences are mostly within 0.002 and no larger than 0.01). Once again, this might be explicable by the fact that the errors the errors in the original test fold are not consistent with the original train and dev folds. Hence, models trained on the original corpus are not necessarily more advantageous when evaluated on the original test fold than on the corrected test fold.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The CoNLL-2003 corpus is highly influential in named entity recognition (NER) research. It has been used for benchmarking many landmark NER models and has been continuing to play a critical role in recent research. In this paper, we took a closer look at the CoNLL-2003 corpus and identified a number of errors. We used a semi-supervised method to identify these errors and then systematically corrected them.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "The primary contribution of this paper is the creation of a more error-free version of the CoNLL-2003 corpus, which can potentially be used to evaluate past NER models more accurately and make future benchmarking more reliable. Indeed, as our experiments on three recent state-of-the-art NER models have shown, our corrections to the corpus have a positive impact on these models: When evaluated on our corrected test fold, all three models trained on our corrected corpus outperformed their counterparts trained on the original corpus by a non-negligible margin.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "We firmly believe that benchmarking corpora are the lighthouses for research, and improving the quality of benchmarking corpora is of utmost importance in guiding the research community. We hope that others can replicate the process we applied to this corpus on other key corpora, and in doing so, improve the utility of these vital resources.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "We classified the errors that we found into several categories. In this section, we give concrete examples of each type.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Types of Errors", |
|
"sec_num": "9.1" |
|
}, |
|
{ |
|
"text": "In some cases, the corpus had correctly identified the span of the entity mention, but the tokens of that span were labeled with an incorrect entity type. For example, the 156th document in the test fold contains the token/label sequence:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tag Errors", |
|
"sec_num": "9.1.1" |
|
}, |
|
{ |
|
"text": "smuggled O heroin O from O Turkey I-LOC to O Antwerp I-ORG", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tag Errors", |
|
"sec_num": "9.1.1" |
|
}, |
|
{ |
|
"text": "This sequence incorrectly tags a mention of the city Antwerp as an ORG entity when it should be tagged LOC.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tag Errors", |
|
"sec_num": "9.1.1" |
|
}, |
|
{ |
|
"text": "We call errors of this type Tag errors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tag Errors", |
|
"sec_num": "9.1.1" |
|
}, |
|
{ |
|
"text": "In other cases, the corpus correctly identified the entity type of an entity mention, but there was an error in labeling the precise range of tokens containing that entity. For example, the 113th document of the test fold contains the token/label sequence:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Span Errors", |
|
"sec_num": "9.1.2" |
|
}, |
|
{ |
|
"text": "Ingeborg I-PER Helen I-PER Markein O", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Span Errors", |
|
"sec_num": "9.1.2" |
|
}, |
|
{ |
|
"text": "This sequence incorrectly marks the span 'Ingeborg Helen' as a 'PER' entity, when the correct span is 'Ingeborg Helen Markein', the full name of a Norwegian skier.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Span Errors", |
|
"sec_num": "9.1.2" |
|
}, |
|
{ |
|
"text": "We call errors of this type Span errors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Span Errors", |
|
"sec_num": "9.1.2" |
|
}, |
|
{ |
|
"text": "At some locations in the corpus, an entity was subject to both a Span error and a Tag error at the same time. For example, the headline for the 23rd document of the test fold contains the token/label sequence:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Both Errors", |
|
"sec_num": "9.1.3" |
|
}, |
|
{ |
|
"text": "These labels miss an instance of the ORG entity ARAB CONTRACTORS, a reference to The Arab Contractors Sporting Club, an Egyptian soccer team. In lieu of labeling ARAB CONTRACTORS, the sequence labels ARAB as a single-token MISC entity, which is not correct because that token is part of the longer ORG entity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ARAB I-MISC CONTRACTORS O WIN O AFRICAN I-MISC CUP I-MISC", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We call errors of this type Both errors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ARAB I-MISC CONTRACTORS O WIN O AFRICAN I-MISC CUP I-MISC", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In some cases, the corpus marks tokens that do not match any entity type at all. For example, the 153rd document in the test fold contains the token/label sequence:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Wrong Errors", |
|
"sec_num": "9.1.4" |
|
}, |
|
{ |
|
"text": "next O Wednesday I-ORG", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Wrong Errors", |
|
"sec_num": "9.1.4" |
|
}, |
|
{ |
|
"text": "This sequence of labels incorrectly marks Wednesday as an ORG entity when that token is in fact a reference to a day of the week.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Wrong Errors", |
|
"sec_num": "9.1.4" |
|
}, |
|
{ |
|
"text": "We call errors of this type Wrong errors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Wrong Errors", |
|
"sec_num": "9.1.4" |
|
}, |
|
{ |
|
"text": "The creators of the corpus used automatic tools to break each document into sentences. Some of these sentence boundaries were incorrect, and some of these incorrect sentence boundaries occurred in the middle of an entity mention. For example, the 20th document of the dev fold contains the token/label sequence:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Errors", |
|
"sec_num": "9.1.5" |
|
}, |
|
{ |
|
"text": "the O Berlin I-MISC Grand I-MISC Prix I-MISC (where the blank line encodes a sentence boundary).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Errors", |
|
"sec_num": "9.1.5" |
|
}, |
|
{ |
|
"text": "Because the labeling and scoring scheme for this corpus does not permit entity mentions to span sentence boundaries, this sequence marks Berlin and Grand Prix as two separate 'MISC' entities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Errors", |
|
"sec_num": "9.1.5" |
|
}, |
|
{ |
|
"text": "This type of error is especially problematic because incorrect labels on these tokens will count as two mistakes when computing precision and recall. In addition, many models process one sentence at a time. When processing the above document, such models will see a sentence that ends with the token Berlin, followed by a sentence that starts with Grand Prix.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Errors", |
|
"sec_num": "9.1.5" |
|
}, |
|
{ |
|
"text": "In other cases, an incorrect sentence boundary led the human labeler to conclude incorrectly that the period after an abbreviation is not part of the abbreviation. For example, the 208th document of the train fold contains the token/label sequence:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Errors", |
|
"sec_num": "9.1.5" |
|
}, |
|
{ |
|
"text": "The I-ORG Walt I-ORG Disney I-ORG Co I-ORG . O said O Thursday O (where the blank line encodes a sentence boundary).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Errors", |
|
"sec_num": "9.1.5" |
|
}, |
|
{ |
|
"text": "In this example, Co. should be labeled as an ORG entity, but only Co (without the period) is marked.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Errors", |
|
"sec_num": "9.1.5" |
|
}, |
|
{ |
|
"text": "We call errors of both these types Sentence errors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Errors", |
|
"sec_num": "9.1.5" |
|
}, |
|
{ |
|
"text": "The authors of the original corpus used the MBT tagger (Daelemans et al., 2002) to tokenize the original news articles. Occasionally, the tokenizer made a mistake; and occasionally, a tokenization mistake happened to coincide with an entity mention. For example, the 169th document of the train fold contains the token/label sequence:", |
|
"cite_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 79, |
|
"text": "(Daelemans et al., 2002)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\"Token\"-Type Errors", |
|
"sec_num": "9.1.6" |
|
}, |
|
{ |
|
"text": "Nigerian I-MISC terms O jeopardize O Commonwealth I-ORG trip-Canada I-MISC . O Here, the tokenizer has incorrectly tokenized \"trip -Canada\" as a single token, and the human labeler has labeled this token as MISC, even though Canada is a LOC entity. Correcting this kind of problem involves splitting the incorrect token into its corrected parts, then relabeling those parts as needed. The above example turns into:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\"Token\"-Type Errors", |
|
"sec_num": "9.1.6" |
|
}, |
|
{ |
|
"text": "Nigerian I-MISC terms O jeopardize O Commonwealth I-ORG trip O -O Canada I-LOC . O", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\"Token\"-Type Errors", |
|
"sec_num": "9.1.6" |
|
}, |
|
{ |
|
"text": "We call errors of this type Token errors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\"Token\"-Type Errors", |
|
"sec_num": "9.1.6" |
|
}, |
|
{ |
|
"text": "\"F1 score\" here means \"harmonic mean of precision and recall over the test fold for models trained on the train fold\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/CODAIT/ text-extensions-for-pandas", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.clips.uantwerpen.be/conll2003/ner/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In our experience, most organizations that use machine learning do not have the ability to travel backwards in time. Hence, they train models on data from the past and apply those models to data from the future.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Detecting data errors: Where are we and what needs to be done?", |
|
"authors": [ |
|
{ |
|
"first": "Ziawasch", |
|
"middle": [], |
|
"last": "Abedjan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Chu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raul", |
|
"middle": [ |
|
"Castro" |
|
], |
|
"last": "Fernandez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ihab", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Ilyas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mourad", |
|
"middle": [], |
|
"last": "Ouzzani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Papotti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Stonebraker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the VLDB Endowment", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "993--1004", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.14778/2994509.2994518" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ziawasch Abedjan, Xu Chu, Dong Deng, Raul Castro Fernandez, Ihab F. Ilyas, Mourad Ouzzani, Paolo Papotti, Michael Stonebraker, and Nan Tang. 2016. Detecting data errors: Where are we and what needs to be done? Proceedings of the VLDB Endowment, 9(12):993-1004.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Pooled contextualized embeddings for named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Akbik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tanja", |
|
"middle": [], |
|
"last": "Bergmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roland", |
|
"middle": [], |
|
"last": "Vollgraf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Annual Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "724--728", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alan Akbik, Tanja Bergmann, and Roland Vollgraf. 2019. Pooled contextualized embeddings for named entity recognition. In Proceedings of the Annual Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 724-728.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Contextual string embeddings for sequence labeling", |
|
"authors": [ |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Akbik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Duncan", |
|
"middle": [], |
|
"last": "Blythe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roland", |
|
"middle": [], |
|
"last": "Vollgraf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1638--1649", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alan Akbik, Duncan Blythe, and Roland Vollgraf. 2018. Contextual string embeddings for sequence labeling. In Proceedings of the International Con- ference on Computational Linguistics, pages 1638- 1649.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "MBT: Memory based tagger, version 1.0, reference guide. Technical report", |
|
"authors": [ |
|
{ |
|
"first": "Walter", |
|
"middle": [], |
|
"last": "Daelemans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakub", |
|
"middle": [], |
|
"last": "Zavrel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Van Den", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ko", |
|
"middle": [], |
|
"last": "Bosch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Van Der", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sloot", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Walter Daelemans, Jakub Zavrel, A. van den Bosch, and Ko van der Sloot. 2002. MBT: Memory based tagger, version 1.0, reference guide. Technical re- port, University of Antwerp.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Annual Conference of the North American Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the Annual Conference of the North American Chapter of the Association for Computational Linguistics, pages 4171-4186.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Neural architectures for named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miguel", |
|
"middle": [], |
|
"last": "Ballesteros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandeep", |
|
"middle": [], |
|
"last": "Subramanian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazuya", |
|
"middle": [], |
|
"last": "Kawakami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Annual Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N16-1030" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample, Miguel Ballesteros, Sandeep Sub- ramanian, Kazuya Kawakami, and Chris Dyer. 2016. Neural architectures for named entity recognition. In Proceedings of the Annual Conference of the North American Chapter of the Association for Com- putational Linguistics: Human Language Technolo- gies.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Rcv1: A new benchmark collection for text categorization research", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tony", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Rose", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "361--397", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David D. Lewis, Yiming Yang, Tony G. Rose, and Fan Li. 2004. Rcv1: A new benchmark collection for text categorization research. Journal of Machine Learning Research, 5:361-397.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Ground truth creation for complex clinical nlp tasks -an iterative vetting approach and lessons learned", |
|
"authors": [ |
|
{ |
|
"first": "Jennifer", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ching-Huei", |
|
"middle": [], |
|
"last": "Tsou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Murthy", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Devarakonda", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "AMIA Summits on Translational Science Proceedings", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "203--212", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jennifer J. Liang, Ching-Huei Tsou, and Murthy V. De- varakonda. 2017. Ground truth creation for complex clinical nlp tasks -an iterative vetting approach and lessons learned. AMIA Summits on Translational Science Proceedings, 2017:203 -212.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Named entity recognition without labelled data: A weak supervision approach", |
|
"authors": [ |
|
{ |
|
"first": "Pierre", |
|
"middle": [], |
|
"last": "Lison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Barnes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aliaksandr", |
|
"middle": [], |
|
"last": "Hubin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samia", |
|
"middle": [], |
|
"last": "Touileb", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1518--1533", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pierre Lison, Jeremy Barnes, Aliaksandr Hubin, and Samia Touileb. 2020. Named entity recognition without labelled data: A weak supervision approach. In Proceedings of the Annual Meeting of the Asso- ciation for Computational Linguistics, pages 1518- 1533.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Data structures for statistical computing in Python", |
|
"authors": [ |
|
{ |
|
"first": "Wes", |
|
"middle": [], |
|
"last": "Mckinney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 9th Python in Science Conference", |
|
"volume": "445", |
|
"issue": "", |
|
"pages": "51--56", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wes McKinney et al. 2010. Data structures for statisti- cal computing in Python. In Proceedings of the 9th Python in Science Conference, volume 445, pages 51-56.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "GloVe: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/D14-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. GloVe: Global vectors for word representation. In Proceedings of the 2014 Confer- ence on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543, Doha, Qatar. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Text chunking using transformation-based learning", |
|
"authors": [ |
|
{ |
|
"first": "Lance", |
|
"middle": [], |
|
"last": "Ramshaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mitch", |
|
"middle": [], |
|
"last": "Marcus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Proceedings of the third Workshop on Very Large Corpora", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lance Ramshaw and Mitch Marcus. 1995. Text chunk- ing using transformation-based learning. In Pro- ceedings of the third Workshop on Very Large Cor- pora.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Snorkel: Rapid training data creation with weak supervision", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Ratner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Stephen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Henry", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [ |
|
"Alan" |
|
], |
|
"last": "Ehrenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sen", |
|
"middle": [], |
|
"last": "Fries", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "R\u00e9", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "The VLDB Journal", |
|
"volume": "29", |
|
"issue": "", |
|
"pages": "709--730", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s00778-019-00552-1" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexander Ratner, Stephen H. Bach, Henry R. Ehren- berg, Jason Alan Fries, Sen Wu, and Christopher R\u00e9. 2020. Snorkel: Rapid training data creation with weak supervision. The VLDB Journal, 29:709-730.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Towards objective measures of algorithm performance across instance space", |
|
"authors": [ |
|
{ |
|
"first": "Kate", |
|
"middle": [], |
|
"last": "Smith-Miles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Davaatseren", |
|
"middle": [], |
|
"last": "Baatar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brendan", |
|
"middle": [], |
|
"last": "Wreford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rhyd", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Computers & Operations Research", |
|
"volume": "45", |
|
"issue": "", |
|
"pages": "12--24", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.cor.2013.11.015" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kate Smith-Miles, Davaatseren Baatar, Brendan Wre- ford, and Rhyd Lewis. 2014. Towards objective measures of algorithm performance across instance space. Computers & Operations Research, 45:12- 24.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Named entity recognition -is there a glass ceiling", |
|
"authors": [ |
|
{ |
|
"first": "Tomasz", |
|
"middle": [], |
|
"last": "Stanislawek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Wr\u00f3blewska", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alicja", |
|
"middle": [], |
|
"last": "W\u00f3jcicka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Ziembicki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Przemyslaw", |
|
"middle": [], |
|
"last": "Biecek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "624--633", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/K19-1058" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomasz Stanislawek, Anna Wr\u00f3blewska, Alicja W\u00f3jcicka, Daniel Ziembicki, and Przemyslaw Biecek. 2019. Named entity recognition -is there a glass ceiling? In Proceedings of the 23rd Confer- ence on Computational Natural Language Learning (CoNLL), pages 624-633, Hong Kong, China. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "An introduction to conditional random fields. Foundations and Trends in Machine Learning", |
|
"authors": [ |
|
{ |
|
"first": "Charles", |
|
"middle": [], |
|
"last": "Sutton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "267--373", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1561/2200000013" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Charles Sutton and Andrew McCallum. 2012. An intro- duction to conditional random fields. Foundations and Trends in Machine Learning, 4(4):267-373.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Introduction to the CoNLL-2003 shared task: Language-independent named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Erik", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Tjong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kim", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fien", |
|
"middle": [], |
|
"last": "De Meulder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the SIGNLL Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "142--147", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1119176.1119195" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik F. Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the CoNLL-2003 shared task: Language-independent named entity recognition. In Proceedings of the SIGNLL Conference on Computa- tional Natural Language Learning, pages 142-147, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Crossweigh: Training named entity tagger from imperfect annotations", |
|
"authors": [ |
|
{ |
|
"first": "Zihan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingbo", |
|
"middle": [], |
|
"last": "Shang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lihao", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiacheng", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiawei", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5153--5162", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1519" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zihan Wang, Jingbo Shang, Liyuan Liu, Lihao Lu, Ji- acheng Liu, and Jiawei Han. 2019. Crossweigh: Training named entity tagger from imperfect anno- tations. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing, pages 5153-5162. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "HuggingFace's transformers: State-of-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R\u00e9mi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joe", |
|
"middle": [], |
|
"last": "Davison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Shleifer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clara", |
|
"middle": [], |
|
"last": "Patrick Von Platen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yacine", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Jernite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Canwen", |
|
"middle": [], |
|
"last": "Plu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teven", |
|
"middle": [ |
|
"Le" |
|
], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Scao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mariama", |
|
"middle": [], |
|
"last": "Gugger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quentin", |
|
"middle": [], |
|
"last": "Drame", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Lhoest", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R\u00e9mi Louf, Morgan Funtow- icz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander M. Rush. 2019. HuggingFace's transformers: State-of-the-art natu- ral language processing. ArXiv, abs/1910.03771.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "F1 scores of the models in our three ensembles. Each scatter point in these plots represents a trained model. The x-axes ofFigures 2b and 2crepresent the number of dimensions of embeddings.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"text": "Distribution of error types found by each of the four methods.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"text": "Number of errors flagged by different combinations of ensembles after filtering by human labelers.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"text": "downloadable archive 4 for the corpus includes the outputs from the original entrants in the 2003", |
|
"content": "<table><tr><td/><td>0.95</td><td/><td>0.95</td><td/><td>0.95</td><td/></tr><tr><td/><td>0.90</td><td/><td>0.90</td><td/><td>0.90</td><td/></tr><tr><td>F1 Score</td><td>hammerton demeulder hendrickx whitelaw wu munro Model bender mccallum carrerasb mayfield curran carrerasa zhang klein chieu florian 0.60 0.65 0.70 0.75 0.80 0.85</td><td>F1 Score</td><td>0.60 0.65 0.70 0.75 0.80 0.85</td><td>32 64 128 256 768 Number of Dimensions F1 Score</td><td>0.60 0.65 0.70 0.75 0.80 0.85</td><td>Number of Dimensions 32 64 128 256 768</td></tr><tr><td colspan=\"2\">(a) Original CoNLL-2003 entries.</td><td/><td colspan=\"2\">(b) BERT-based models.</td><td colspan=\"2\">(c) BERT + cross-validation.</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"text": "Jaccard similarity between the flagged labels from different pairs of ensembles before and after human review. The original models flagged a substantially different set of labels from our BERT-based custom models, and this divergence increased after manual review.", |
|
"content": "<table><tr><td/><td/><td/><td colspan=\"2\">Original models</td></tr><tr><td/><td/><td/><td colspan=\"2\">Custom models</td></tr><tr><td>149</td><td>130</td><td>127</td><td colspan=\"2\">Custom models with cross-validation</td></tr><tr><td>22</td><td>88</td><td>19</td><td/></tr><tr><td/><td>90</td><td/><td>74</td><td>203</td><td>109</td></tr><tr><td colspan=\"3\">dev + test folds</td><td colspan=\"2\">train fold</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF6": { |
|
"text": "Experimental results on the original CoNLL 2003 (English) competition.", |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |