|
{ |
|
"paper_id": "K19-1048", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:06:31.331454Z" |
|
}, |
|
"title": "Learning A Unified Named Entity Tagger From Multiple Partially Annotated Corpora For Efficient Adaptation", |
|
"authors": [ |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Southern", |
|
"location": { |
|
"country": "California" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Outreach, Inc", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Boschee", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Southern", |
|
"location": { |
|
"country": "California" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Southern", |
|
"location": { |
|
"country": "California" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Named entity recognition (NER) identifies typed entity mentions in raw text. While the task is well-established, there is no universally used tagset: often, datasets are annotated for use in downstream applications and accordingly only cover a small set of entity types relevant to a particular task. For instance, in the biomedical domain, one corpus might annotate genes, another chemicals, and another diseases-despite the texts in each corpus containing references to all three types of entities. In this paper, we propose a deep structured model to integrate these \"partially annotated\" datasets to jointly identify all entity types appearing in the training corpora. By leveraging multiple datasets, the model can learn robust input representations; by building a joint structured model, it avoids potential conflicts caused by combining several models' predictions at test time. Experiments show that the proposed model significantly outperforms strong multi-task learning baselines when training on multiple, partially annotated datasets and testing on datasets that contain tags from more than one of the training corpora. 1", |
|
"pdf_parse": { |
|
"paper_id": "K19-1048", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Named entity recognition (NER) identifies typed entity mentions in raw text. While the task is well-established, there is no universally used tagset: often, datasets are annotated for use in downstream applications and accordingly only cover a small set of entity types relevant to a particular task. For instance, in the biomedical domain, one corpus might annotate genes, another chemicals, and another diseases-despite the texts in each corpus containing references to all three types of entities. In this paper, we propose a deep structured model to integrate these \"partially annotated\" datasets to jointly identify all entity types appearing in the training corpora. By leveraging multiple datasets, the model can learn robust input representations; by building a joint structured model, it avoids potential conflicts caused by combining several models' predictions at test time. Experiments show that the proposed model significantly outperforms strong multi-task learning baselines when training on multiple, partially annotated datasets and testing on datasets that contain tags from more than one of the training corpora. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Named Entity Recognition (NER), which identifies the boundaries and types of entity mentions from raw text, is a fundamental problem in natural language processing (NLP). It is a basic component for many downstream tasks, such as relation extraction (Hasegawa et al., 2004; Mooney and Bunescu, 2005) , coreference resolution (Soon et al., 2001) , and knowledge base construction (Craven et al., 1998; Craven and Kumlien, 1999) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 250, |
|
"end": 273, |
|
"text": "(Hasegawa et al., 2004;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 274, |
|
"end": 299, |
|
"text": "Mooney and Bunescu, 2005)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 325, |
|
"end": 344, |
|
"text": "(Soon et al., 2001)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 379, |
|
"end": 400, |
|
"text": "(Craven et al., 1998;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 401, |
|
"end": 426, |
|
"text": "Craven and Kumlien, 1999)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "One problem in NER is the diversity of entity types, which vary in scope for different domains and downstream tasks. Traditional NER for the news domain focuses on three coarsegrained entity types: person, location, and organization (Tjong Kim Sang and De Meulder, 2003) . However, as NLP technologies have been applied to a broader set of domains, many other entity types have been targeted. For instance, Ritter et al. (2011) add seven new entity types (e.g., product, tv-show) on top of the previous three when annotating tweets. Other efforts also define different but partially overlapping sets of entity types (Walker et al., 2006; Ji et al., 2010; Consortium, 2013; Aguilar et al., 2014) . These non-unified annotation schemas result in partially annotated datasets: each dataset is only annotated with a subset of possible entity types.", |
|
"cite_spans": [ |
|
{ |
|
"start": 240, |
|
"end": 270, |
|
"text": "Kim Sang and De Meulder, 2003)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 407, |
|
"end": 427, |
|
"text": "Ritter et al. (2011)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 616, |
|
"end": 637, |
|
"text": "(Walker et al., 2006;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 638, |
|
"end": 654, |
|
"text": "Ji et al., 2010;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 655, |
|
"end": 672, |
|
"text": "Consortium, 2013;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 673, |
|
"end": 694, |
|
"text": "Aguilar et al., 2014)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "One approach to this problem is to train individual NE taggers for each partially annotated dataset and combine their results using some heuristics. Figure 1 shows an example that demonstrates the possible shortcomings of this approach, using the biomedical domain as a case study. 2 Here, we train four separate models on four partially annotated datasets: AnatEM (Pyysalo and Ananiadou, 2013) annotated for the anatomy type, BC2GM (Smith et al., 2008) for the gene type, JNLPBA (Kim et al., 2004) for cell types, and Linnaeus (Gerner et al., 2010) for the species type. We can see that the models' predictions contradict each other when applied to the same test sentence-making it a challenge to accurately combine them.", |
|
"cite_spans": [ |
|
{ |
|
"start": 282, |
|
"end": 283, |
|
"text": "2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 365, |
|
"end": 394, |
|
"text": "(Pyysalo and Ananiadou, 2013)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 433, |
|
"end": 453, |
|
"text": "(Smith et al., 2008)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 480, |
|
"end": 498, |
|
"text": "(Kim et al., 2004)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 528, |
|
"end": 549, |
|
"text": "(Gerner et al., 2010)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 149, |
|
"end": 157, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we propose a deep structured model to leverage multiple partially annotated datasets, allowing us to jointly identify the union Figure 1 : An example sentence from the CellFinder corpus (Neves et al., 2012) showing the challenges in combining the output of individual NE taggers. The Gold row is the human annotations in CellFinder. The rows below are predictions made by models trained on datasets that each contain only a subset of the CellFinder types. Note that the individual taggers' predictions can conflict with each other, making it challenging to combine them. (Note: we renamed CellFinder's Cell Component to Cell Type to fit it in the space above.) of all entity types presented in the training data. The model leverages supervision signals across diverse datasets to learn robust input representations, thus improving the performance for each entity type. Moreover, it makes joint predictions to avoid potential conflicts among models built on different entity types, allowing further improvement for cross-type NER.", |
|
"cite_spans": [ |
|
{ |
|
"start": 201, |
|
"end": 221, |
|
"text": "(Neves et al., 2012)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 151, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Experiments on both real-world and synthetic datasets show that our model can efficiently adapt to new corpora that have more types than any individual dataset used for training and that it achieves significantly better results compared to strong multi-task learning baselines.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We formally define the problem by first defining our terminology.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Global Tag Space. Let C i denote a corpus, and T C i denote the set of entity types that are tagged in corpus C i . When there are a set of corpora C = {C 1 , C 2 , ..., C n }, each has its own tag space concerning different entity types, the global tag space is defined as the union of the local tag space. Formally,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "T C = T C 1 \u222a T C 2 \u222a ... \u222a T Cn . Partially Annotated Corpus. If T C i T C , then C i is a partially annotated corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Global Evaluation. When a model is trained on a set of partially annotated corpora C and predicts tags for the whole global tag space T C , we say it is making global predictions. Accordingly, the evaluation of the models' performance on T C is called global evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our goal is to train a single unified NE tagger from several partially annotated corpora for efficient adaptation to new corpora that have more types than any individual dataset used during training. Formally, we have a set of corpora C = {C 1 , C 2 , ..., C n }, and we propose to train a joint model on C such that it makes predictions for the global tag space T C . One benefit of this joint model is that it can be easily adapted to a new tag space T Cu where T Cu \u2286 T C , and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "T Cu T C i , \u2200C i \u2208 C.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this section, we first introduce neural architectures for NER which our work builds upon and then summarize previous work on imperfect annotation problems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Related Work", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "With recent advances using deep neural networks, bi-directional long short-term memory networks with conditional random fields (BiLSTM-CRF) have become standard for NER (Lample et al., 2016) . A typical architecture consists of a BiL-STM layer to learn feature representations from the input and a CRF layer to model the interdependencies between adjacent labels and perform joint inference. Ma and Hovy (2016) introduce additional character-level convolutional neural networks (CNNs) to capture subword unit information. In this paper, we use a BiLSTM-CRF with character-level modeling as our base model. We now briefly review the BiLSTM-CRF model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 169, |
|
"end": 190, |
|
"text": "(Lample et al., 2016)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 392, |
|
"end": 410, |
|
"text": "Ma and Hovy (2016)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Architectures for NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "BiLSTMs. Long Short Term Memory networks (LSTMs) (Hochreiter and Schmidhuber, 1997) are a variation of RNNs that are designed to avoid the vanishing/exploding gradient problem (Bengio et al., 1994) . Specifically, BiLSTMs take as input a sequence of words x = {x k |k \u2208 N } and output a sequence of hidden vectors: H = {h k |k \u2208 N } BiLSTMs combine a left-to-right (forward) and a right-to-left (backward) LSTM to capture both left and right context. Formally, they produce a hidden", |
|
"cite_spans": [ |
|
{ |
|
"start": 49, |
|
"end": 83, |
|
"text": "(Hochreiter and Schmidhuber, 1997)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 176, |
|
"end": 197, |
|
"text": "(Bengio et al., 1994)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Architectures for NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "vectorh i = [ \u2212 \u2192 h i ; \u2190 \u2212 h i ] for each input, where \u2212 \u2192 h i and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Architectures for NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2190 \u2212 h i are produced by the forward and the backward LSTMs respectively; [; ] denotes vector concatenation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Architectures for NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Character-level Modeling. Following Wang et al. (2018) , we use a BiLSTM for character-level modeling. We concatenate the hidden vector of the space after a word from the forward LSTM and the hidden vector of the space before a word from the backward LSTM to form a character-level representation of the word: Neural-CRFs. Conditional Random Fields (CRFs) (Lafferty et al., 2001) are sequence tagging models that capture the inter-dependencies between the output tags; they have been widely used for NER (McCallum and Li, 2003; Peng and Dredze, 2015 . Given a set of training data {x i , y i } N , a CRF minimizes negative log-likelihood:", |
|
"cite_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 54, |
|
"text": "Wang et al. (2018)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 356, |
|
"end": 379, |
|
"text": "(Lafferty et al., 2001)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 504, |
|
"end": 527, |
|
"text": "(McCallum and Li, 2003;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 528, |
|
"end": 549, |
|
"text": "Peng and Dredze, 2015", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Architectures for NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "h c i = [ \u2212 \u2192 h c i ; \u2190 \u2212 h c i ].", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Architectures for NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "min \u0398 \u2212 i log P (y i | x i ; \u0398),", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Neural Architectures for NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P (y i | x i ; \u0398) = Gold Energy P artition = St(y i ) y St(y )", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Neural Architectures for NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where y is any possible tag sequence with the same length as y i , St(y ) is the potential of the tag sequence y , and St(y i ) is the potential of the gold tag sequence. The numerator St(y i ) is called the gold energy function, and the denominator y St(y ) is the partition function. The likelihood function using globally annotated data is illustrated in Figure 2a . The potential of a tag sequence can be computed as:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 358, |
|
"end": 367, |
|
"text": "Figure 2a", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Neural Architectures for NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "St(y) = |y| t=1 Score(y[t], y[t \u2212 1])", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Neural Architectures for NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where y[t] is the tth element in y (y[\u22121] is the start of the sequence), and ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Architectures for NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "Score(y[t], y[t \u2212 1]) = exp (tr(y[t], y[t \u2212 1])) * exp (em(y[t]))", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Neural Architectures for NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Learning from multiple partially annotated datasets could be more generally thought of as learning from imperfect annotations. In that broad sense, there are several notable areas of prior work. One of the most prominent concerns learning from incomplete annotations (noisy labels), where some occurrences of entities are neglected in the annotation process and falsely labeled as non-entities (negative). A related problem is learning from unlabeled data with distant supervision. A major challenge of all these settings, including ours, is that a positive instance might be labeled as negative. A well-explored solution to this problem is proposed by Tsuboi et al. (2008) , which instead of maximizing the likelihood of the gold tag sequence, we maximize the total likelihood for all possible tag sequences consistent with the gold labels. Tsuboi et al. (2008) ; Yang and Vozila (2014) applied this idea to the incomplete annotation setting; Shang et al. 2018; Liu et al. (2014) applied it to the unlabeled data with distant supervision setting; and Greenberg et al. (2018) applied it to the partial annotation setting. While this is a general solution, its primary drawback is that it assumes a uniform prior on all labels consistent with the gold labels. This may have the result of overly encouraging the prediction of entities, resulting in low precision.", |
|
"cite_spans": [ |
|
{ |
|
"start": 653, |
|
"end": 673, |
|
"text": "Tsuboi et al. (2008)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 842, |
|
"end": 862, |
|
"text": "Tsuboi et al. (2008)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 865, |
|
"end": 887, |
|
"text": "Yang and Vozila (2014)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 963, |
|
"end": 980, |
|
"text": "Liu et al. (2014)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1052, |
|
"end": 1075, |
|
"text": "Greenberg et al. (2018)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning from Imperfect Annotations", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "To tackle the problem of incomplete annotations, Carlson et al. (2009) ; Yang et al. (2018) explored bootstrap-based semi-supervised learning on unlabeled data, iteratively identifying new entities with the taggers and then re-training the taggers. Bellare and McCallum (2007) ; Li and Liu (2005) ; Fernandes and Brefeld (2011) explored an EM algorithm with semi-supervision.", |
|
"cite_spans": [ |
|
{ |
|
"start": 49, |
|
"end": 70, |
|
"text": "Carlson et al. (2009)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 249, |
|
"end": 276, |
|
"text": "Bellare and McCallum (2007)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 279, |
|
"end": 296, |
|
"text": "Li and Liu (2005)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning from Imperfect Annotations", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "For the partial annotation problem, most previous work has focused on building individual taggers for each dataset and using single-task learning or multi-task learning (Crichton et al., 2017; Wang et al., 2018 ). In singletask learning, each model is trained separately on each dataset C i , and makes local predictions on T C i . Based on the neural-CRF architecture, multitask learning uses a different CRF layer for each dataset C i (each task) to make local predictions on T C i , and shares the lower-level representation learning component across all tasks. Both singletask learning and multi-task learning make local predictions and have to apply heuristics to combine the model predictions, resulting in the collision problem demonstrated in Figure 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 169, |
|
"end": 192, |
|
"text": "(Crichton et al., 2017;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 193, |
|
"end": 210, |
|
"text": "Wang et al., 2018", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 751, |
|
"end": 759, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Learning from Imperfect Annotations", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "To the best of our knowledge, Greenberg et al. (2018) is the only prior work trying to build a unified model from multiple partially annotated corpora. We will show that their model, which is reminiscent of Tsuboi et al. (2008) , is a special case of ours and that our other variations achieve better performance. In addition, they only evaluated the model on the training corpora while we conduct evaluations to test the model's ability to adapt to new corpora with different tag spaces.", |
|
"cite_spans": [ |
|
{ |
|
"start": 30, |
|
"end": 53, |
|
"text": "Greenberg et al. (2018)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 207, |
|
"end": 227, |
|
"text": "Tsuboi et al. (2008)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning from Imperfect Annotations", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "As mentioned above, we use a BiLSTM-CRF with character-level modeling as our base model. Our goal is to build a unified model to make global predictions. That is, our model will be jointly trained on multiple partially annotated datasets C and make predictions on the global tag space T C . Such a unified model will enjoy the benefit of learning robust representations from multiple datasets just like multi-task learning while maintaining a joint probability distribution of the global tag space to avoid possible conflicts from individual models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "A simple solution to the problem is to merge all the datasets into one giant corpus. A single model can then be trained on this corpus to make global predictions. However, such a corpus will be missing many correct annotations, since each portion will be annotated with only a subset of the target entity types. Figure 2b shows an example: here, a location (Texas) exists but is labeled as a non-entity, because the original dataset from which this sentence is drawn does not annotate locations at all. As a result, this approach suffers from false penalties when applying the original likelihood function (Eq. 2-4) to train the model, meaning that it penalizes predictions that correctly identify entities with types that are not annotated for a particular sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 312, |
|
"end": 321, |
|
"text": "Figure 2b", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Naive Approach", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "One way to improve performance is to explicitly acknowledge the incompleteness of the existing \"gold\" annotations and to give the model credit for predicting any tag sequence that is consistent with the partial annotations. This can be done by modifying the CRF's gold energy function, illustrated in the upper part of Figure 2c . Specifically, in this example, John is labeled as PER, so PER is the only possible correct tag at that position. However, lives, in, and Texas are labeled as O (nonentity), which here means only that they may not be PER-but any of them could be LOC, since locations are not annotated for this sentence. Therefore, any sequence that assigns either O or LOC for any of these three positions is consistent with the gold labels. To account for this, we modify the gold energy function to credit all tag sequences that are consistent with the gold annotations, encouraging the model to predict other consistent labels when the gold label is O. Tsuboi et al. (2008) propose a specific solution that applies this idea on incomplete annotations: instead of maximizing the likelihood of the gold tag sequence when optimizing the CRF model, they maximize the total likelihood of all possible tag sequences consistent with the gold labels. This approach is later used by Greenberg et al. (2018) to handle the problem of partial annotation. We will address a potential problem with their method and propose a generalized version in Section 4.4.", |
|
"cite_spans": [ |
|
{ |
|
"start": 970, |
|
"end": 990, |
|
"text": "Tsuboi et al. (2008)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 1291, |
|
"end": 1314, |
|
"text": "Greenberg et al. (2018)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 319, |
|
"end": 328, |
|
"text": "Figure 2c", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Improving the Gold Energy Function", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Modifying the gold energy function will give credit to a system for producing alternative entity labels for words tagged as O in the partially annotated training. A different solution is to simply not penalize predictions of such alternative labels. This can be done by modifying the partition function and keeping the gold energy function unchanged. The lower part of Figure 2c gives an illustration. As stated above, LOC is a consistent alternative entity label for lives, in, and Texas. We therefore exclude from our calculations any paths that include LOC at any of those positions. More generally, we exclude all such consistent but alternative tag sequences from the computation of the CRF's partition function. Section 4.4 gives formal definitions with equations. The improved partition function sets the model free to predict alternative labels without penalty (as long as they are consistent with the known gold annotations), but it does not give them any positive credit for doing so (as in the previous approach). We hypothesize that the improved partition function would work better than the improved gold energy function in our setting because it addresses the false penalties problem more precisely. We will verify this hypothesis in our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 369, |
|
"end": 378, |
|
"text": "Figure 2c", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Improving the Partition Function", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "There is a potential problem with naively applying the improved gold energy function: when the gold label is O, the model is encouraged to predict other consistent labels as strongly as it is encouraged to predict O. However, many O labels are confident annotations of O. As a result, naively training with the improved gold energy function tends to over-predict entities and not predict Os. To mitigate this problem, we discount the energy of tag sequences that go through alternative labels. This can be achieved by introducing a hyper-parameter M (mask) \u2208 [0, 1] as a discounting factor for the gold energy function. Formally, we modify Eq 3 to:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discounting Alternative Sequences", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "St (y, M ) = |y| t=1 (Score(y[t], y[t \u2212 1]) * mask(y[t], M )),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discounting Alternative Sequences", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discounting Alternative Sequences", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "mask(y[t], M ) = M, if y[t] \u2208 alternative 1, Otherwise .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discounting Alternative Sequences", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "where alternative is the set of alternative labels. We thus have the improved gold energy function:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discounting Alternative Sequences", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "Improved Gold Energy = y\u2208valid St (y, M ),", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Discounting Alternative Sequences", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "where valid is the set of all tag sequences that are consistent with the gold sequence, including the gold sequence itself.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discounting Alternative Sequences", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Similarly, for the improved partition function, we can use the same strategy to discount the energy of alternative sequences rather than completely removing them. We thus introduce another M \u2208 [0, 1] and the improved partition function becomes:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discounting Alternative Sequences", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Improved P artition = y St (y , M ), (6)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discounting Alternative Sequences", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "For generality, we combine the improved gold energy and the improved partition function to make a new likelihood function as our final model:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Improved Functions", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "Improved LH = y\u2208valid St (y, M ) y St (y , M )", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Combining Improved Functions", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "To ensure Equation 7 is a valid likelihood function (the probabilities of all sequences sum to 1), we need a constraint that M = M . Note that Equation 7 subsumes all models discussed in this section. Specifically, when M = 0, M = 1, the model is the Naive Model discussed in Section 4.1; when M = 1, M = 1, the model is the same as Greenberg et al. (2018) discussed in Section 4.2; when M = 0, M = 0, the model is the same as proposed in Section 4.3. We have a general perspective of all the models by simply treating M and M as hyper-parameters. Note that for the Naive Model, since M ! = M , the Equation 7 is not always a valid likelihood function 3 . This may partially explain why the Naive Model performs so poorly under this setting. We posit that the model will work the best when M = M .", |
|
"cite_spans": [ |
|
{ |
|
"start": 333, |
|
"end": 356, |
|
"text": "Greenberg et al. (2018)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combining Improved Functions", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "Our goal is to train a unified NER model on multiple partially annotated datasets. This model will make global predictions and can efficiently adapt to new corpora that contain tags from more than one training corpus. To fully test this capability, we would need a single test set annotated with all types of interest. However, the motivation behind this effort is that such a dataset typically does not exist. We therefore take two approaches to approximate such an evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "In the first evaluation setting, we take advantage of the fact that although there may not be a single dataset annotated with all types of named entities of interest, there exist several datasets that cover types from more than one of the training corpora. Specifically, we are able to select test corpora that each cover types of interest from multiple training corpora. Table 1 shows the biomedical corpora we use and their entity types. For example, we use BC5CDR for global evaluation, because its entity types (Chemical and Disease) cover multiple training corpora (BC4CHEM for Chemical and NCBI for Disease).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 372, |
|
"end": 379, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "In the second evaluation setting, we create synthetic datasets from the CoNLL 2003 NER dataset to simulate training and global evaluations. Specifically, the CoNLL 2003 dataset is annotated with four entity types: location, person, organization, and miscellaneous entities. We randomly split the training set into four portions, each containing only one entity type (all other types are removed). In this setting, the four portions of the training set are used for training and the origi-3 This may be confusing because when M = 0, M = 1 it looks exactly the same as the original CRF likelihood function. But in the partial annotation setting, this means that the scores of alternative sequences will be zero in the numerator but non-zero in the denominator, which makes the total likelihood less than 1. It suggests that the original CRF likelihood function is not suitable for the partial annotation setting. Table 1 : Details of the biomedical corpora. \"others\" denotes NE types that do not appear in the training corpora, and thus are not evaluated. nal dataset with all entities annotated is used as a global corpus. More details about all the datasets can be found in Appendix A.1.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 911, |
|
"end": 918, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The motivation for this work rests on the assumption that even when a dataset is annotated for a certain set of entity types, it likely contains other types of entities that are unlabeled. To verify this assumption, we expand the annotations of each dataset using heuristics and compute the pairwise mention-level overlap between the datasets. Specifically, suppose we are comparing two datasets, A and B. We first construct A' and B', where A' contains all mentions in A but is augmented with new mentions found by taking all strings annotated in B and marking them as named entities in A (regardless of context; there may obviously be some errors). We do the same (in the opposite direction) to construct B'. We then compute the pairwise overlap coefficient between A' and B' according to the following criterion:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Biomedical Dataset Analysis", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "overlap(A , B ) = |A \u2229 B | min(|A |, |B |)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Biomedical Dataset Analysis", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": ". Figure 3 shows the heat maps. For the training group, BC2GM, BC4CHEMD, and Linnaeus are considerably overlapped, although they are annotated with different entity types (GP, Chemical, and Species). This confirms our assumption that although the datasets are annotated for a subset of entity types, they contain other types that are unlabeled. 4", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 2, |
|
"end": 10, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Biomedical Dataset Analysis", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "We borrow most of the best hyper-parameters reported by Wang et al. (2018) . The hidden sizes of the BiLSTMs are tuned, and the best value we found is 100 for the character-level BiLSTM, and 300 for the word-level BiLSTM. We also ", |
|
"cite_spans": [ |
|
{ |
|
"start": 56, |
|
"end": 74, |
|
"text": "Wang et al. (2018)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hyper-parameters.", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We compare different variations of our unified model and other models in different settings. We first train models on all training corpora, and then perform evaluations under two scenarios: (1) no-supervision: directly evaluating the trained models on each global corpora;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Compared Models.", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "(2) limitedsupervision: fine-tuning the models on a small subset of the training portion of each global corpus before the evaluations. Under both scenarios, we report performance of four different models:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Compared Models.", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "\u2022 MTM/MTM-vote: Train a multi-task model (MTM) on training corpora, using a separate CRF for each corpus. (This is the current state-of-the-art structure (Wang et al., 2018) when evaluated on the training corpora.)", |
|
"cite_spans": [ |
|
{ |
|
"start": 154, |
|
"end": 173, |
|
"text": "(Wang et al., 2018)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Compared Models.", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "-Under the no-supervision setting, we heuristically combine all existing CRF's predictions to make global predictions. Specifically, we apply two heuristics to resolve conflicts while preserving entity chunk-level consistency. First, where predictions from more than one model overlap, we expand each prediction's boundary to the outermost position. Second, we always favor the predictions of named entities over the predictions of non-entity. 5 -Under the limited-supervision setting, for each global corpus, we add a new CRF and train it along with the LSTMs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Compared Models.", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "\u2022 Unified-01: Use the naive training approach described in 4.1; this corresponds to our unified model with settings M = 0, M = 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Compared Models.", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "\u2022 Unified-11: Use the improved gold energy function described in 4.2; this corresponds to our unified model with settings M = 1, M = 1 and is equivalent to the model proposed by Greenberg et al. (2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 178, |
|
"end": 201, |
|
"text": "Greenberg et al. (2018)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Compared Models.", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "\u2022 Unified-00: Use the improved partition function proposed in 4.3; this corresponds to our unified model with settings M = 0, M = 0.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Compared Models.", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Among the compared models, Unified-01 (the naive model) and MTM/MTM-Vote are either simple or commonly used methods and thus are treated as baselines. Unified-00 is a novel approach. Although Greenberg et al. (2018) used the approach of Unified-11, they only evaluated the model on training corpora/tasks while we apply it for task adaptation. Moreover, it is a special case of our proposed framework, thus we argue that people can simply tune M and M to get good performance for adaptations to new tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 192, |
|
"end": 215, |
|
"text": "Greenberg et al. (2018)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Compared Models.", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "As mentioned above, we compare the results of four different approaches in no-supervision and limited-supervision settings, both with real-world biomedical data and synthetic news data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "As a sanity check, we also evaluate the models on the test sets of the training corpora. The results can be found in Appendix A. Table 2 : Results for task adaptation in the no-supervision setting. The best f1 score in each column that is significantly better than the second best is bold-faced, while those are better but not significantly are underlined. All the significance tests are conducted using mention-level McNemar's Chi-square test, with p-value = 0.01. Figure 4: Plot of f1 scores for task adaptation in the limited-supervision setting. X-axis represents the number of sentences used for fine-tuning. STM(2k) is a STM trained on 2k sentences sampled from the global corpus, and STM(all) is trained on the entire training set of the corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 136, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "our MTM performs comparably with state-of-theart systems evaluated on the training corpora, and thus is a strong baseline. Table 2 demonstrates the results for task adaptation in the no-supervision setting. We report precision and recall in addition to f1 scores to better show the differences between the models.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 123, |
|
"end": 130, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Comparing on f1 scores, Unified-00 (our new model) significantly outperforms all other models on three out of four datasets, demonstrating its effectiveness. Unified-11 also achieves good results, with higher recall but lower precision than Unified-00. This aligns well with our hypothesis that it encourages predictions of entities. Conversely, Unified-01 (the naive approach) achieves the highest precision but lowest recall, which is reasonable considering the problem of false penalties that discourages the model from predicting en-tities. We also found that the model achieves better performance when M = M , which supports our hypothesis in 4.5 that the model works better with a valid likelihood function.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "No-Supervision Setting", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "To further demonstrate the models' ability to adapt to new datasets with a small amount of supervision, we sample a small subset of the training portion of each global evaluation corpus to fine-tune the trained models. We show the performance of the models fine-tuned with different amounts of sampled data. For each global corpus, we show a single-task model (STM) trained on it with a reasonable amount of data (two thousand sentences for the biomedical corpora). In the CoNLL 2003 setting, we train the STM on the entire training data for a fair comparison, because all other models are first trained on the four training portions, which essentially look through the entire training set (just partially annotated). The results of the STMs are used as benchmarks. Experimental results are presented in Figure 4 . Firstly, with much less training data, all the models achieve comparable or noticeably better performance than the STMs trained from scratch, demonstrating that training on the partially annotated corpora does help to boost performance on global evaluation corpora. Additionally, MTMs are worse than all the unified models, because they only share the LSTM layers, but lose all the knowledge in the CRFs when adapted to new corpora. The unified models have the advantage that they can reuse the robust CRFs learned from a large amount of data. This is more obvious in the CoNLL 2003 evaluation setting, where the unified models that reuse the pre-trained CRFs achieve good performance trained with only 50 sentences, but the MTM, which does not reuse the CRFs, needs a larger amount of training data to catch up.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 804, |
|
"end": 812, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Limited-Supervision Setting", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "In general, Unified-00, our novel approach proposed here, still performs the best on every dataset. We note that although Unified-01 has an extremely low recall on the CoNLL 2003 dataset in the no-supervision setting, it works surprisingly well in the limited-supervision setting. On the other hand, Unified-00 and Unified-11 generally perform better than Unified-01 on real-world biomedical datasets, especially when fine-tuned on less data. Again, since all the unified models are special cases of our proposed framework, we argue that, for adapting to new datasets, people can simply tune the discounting factors M and M to get good results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limited-Supervision Setting", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "In this paper, we propose a unified model that learns from multiple partially annotated datasets to make joint predictions on the union of entity types appearing in any training dataset. The model integrates learning signals from different datasets and avoids potential conflicts that would result from combining independent predictions from multiple models. Experiments show that the proposed unified model can efficiently adapt to new corpora that have more entity types than any of the training corpora, and performs better than the baseline approaches.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "In future work, we plan to explore other algorithms (e.g. imitation learning) that allow the model the explore the unknown space during training, using delayed rewards to decide whether the model should trust its exploration. Analysis of the global evaluation results suggests that the unified model is under-predicting, meaning there is still room for improvement specifically on recall. We plan to explore further changes to the current objectives to encourage more entity predictions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Finally, the approach proposed in this paper also does not handle entity types of varying granularities or tagsets with mismatched guidelines (e.g. one dataset annotates only for-profit companies as ORG and one annotates all formalized groups). Effectively modeling these complications is an interesting area for future work. Table 4 : Statistics for global evaluation corpora. \"Others\" denote the NEs which do not appeared in training data, thus are not evaluated.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 326, |
|
"end": 333, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Below we introduce the datasets in the biomedicine domain and the news domain.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Datasets", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The training group consists of five datasets: BC2GM, BC4CHEM, NCBI-disease, JNLPBA, and Linnaeus. The first two datasets are from different BioCreative shared tasks (Smith et al., 2008; Krallinger et al., 2015; . NCBI-disease is created by Dogan et al. (2014) for disease name recognition and normalization. JNLPBA comes from the 2004 shared task from joint workshop on natural language processing in biomedicine and its applications (Kim et al., 2004) , and Linnaeus is a species corpus composed by Gerner et al. (2010) . More information about the datasets can be found in Table 3 . Below are detailed descriptions of the datasets: BC2GM is a gene/protein corpus. The annotation is Gene. It's provided by the BioCreative II Shared Task for gene mention recognition.", |
|
"cite_spans": [ |
|
{ |
|
"start": 165, |
|
"end": 185, |
|
"text": "(Smith et al., 2008;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 186, |
|
"end": 210, |
|
"text": "Krallinger et al., 2015;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 434, |
|
"end": 452, |
|
"text": "(Kim et al., 2004)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 500, |
|
"end": 520, |
|
"text": "Gerner et al. (2010)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 575, |
|
"end": 582, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.1.1 Biomedicine domain: Local training group", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "BC4CHEM is a chemical corpus. The annotation is Chemical. It's provided by the BioCreative IV Shared Task for chemical mention recognition. Linnaeus is a species corpus. The annotation is Species. The original project was created for entity mention recognition.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1.1 Biomedicine domain: Local training group", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We reemphasize here that the purpose of the global evaluation is to test the model's ability to making global predictions and efficiently adapt to global corpora. While no corpus is globally annotated, we identify several existing corpora to approximate the global evaluation. Each test corpus is annotated with a superset of several training corpora to test the model's generalizability outside of the local tag spaces. The global evaluation group contains three datasets: BC5CDR, BioNLP13CG, and BioNLP11ID. Each is annotated with multiple entity types. BC5CDR comes from the BioCreative shared tasks (Smith et al., 2008; Krallinger et al., 2015; . BioNLP13CG and BioNLP11ID come from the BioNLP shared task (Kim et al., 2013) . More information about the global evaluation datasets can be found in Table 4 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 603, |
|
"end": 623, |
|
"text": "(Smith et al., 2008;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 624, |
|
"end": 648, |
|
"text": "Krallinger et al., 2015;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 710, |
|
"end": 728, |
|
"text": "(Kim et al., 2013)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 801, |
|
"end": 808, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.1.2 Biomedicine domain: Global evaluation group", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Below are detailed descriptions of the datasets: BC5CDR is a chemical and disease corpus. The annotation is Chemical and Disease. It's provided by BioCreative V Shared Task for chemical and disease mention recognition.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1.2 Biomedicine domain: Global evaluation group", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "BioNLP13CG consists of Gene/Protein and Related Product, Cancel, Chemical, Anatomy and Organism and others. BioNLP11ID consists of Gene/Protein, Chemical, and Organism. The annotation is same as the NE types but has a finer ontology scope. Table 6 : Local evaluation (f1 scores). The best results that are significantly better than the second best are boldfaced, while those are best but not significantly better than the second best are underlined. All the significance tests are conducted using mention-level McNemar's Chi-square test, with p-value = 0.01.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 240, |
|
"end": 247, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.1.2 Biomedicine domain: Global evaluation group", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "There are inconsistencies between the entity type names in different datasets, mainly due to different granularities. To remove this unnecessary noise, we manually merged some entity types. For example, we unify Gene and Protein into Gene/Protein as they are commonly used interchangeably; we merge \"Simple Chemical\" to \"Chemical\" and leave the problem of entity type granularity for future work. The information in Table 3 and 4 reflects the merged types.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 416, |
|
"end": 423, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.1.2 Biomedicine domain: Global evaluation group", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use the CoNLL 2003 NER dataset ( ) to evaluate the models in news domain. More information about the dataset can be found in Table 5 . We use synthetic data from the dataset to simulate local training and global evaluation. Specifically, the CoNLL 2003 NER dataset is annotated with four entity types: location, person, organization, and miscellaneous entities. We randomly split the training set into four portions, each contains only one entity type respectively, with other types changed to \"O\". The models are trained on the four training portions and we test on the original test set with all entity types annotated.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 135, |
|
"text": "Table 5", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.1.3 News domain: CoNLL 2003 NER dataset", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For the news domain, we use the default train, dev, test portion of the CoNLL 2003 NER dataset. For the biomedicine domain, we follow the data split in Crichton et al. (2017) for both the training and the evaluation groups. All datasets are divided into three portions: train, dev, and test. We train the model on the training set of the training group and tune the hyper-parameters on the corresponding development set. Global evaluations are performed on the test set of the evaluation group.", |
|
"cite_spans": [ |
|
{ |
|
"start": 152, |
|
"end": 174, |
|
"text": "Crichton et al. (2017)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1.4 Data split", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For a sanity check, we evaluate the models on the training corpora and compare the results with state-of-the-art systems. In this setting, all the models are trained on the training set of the training corpora (without fine-tuning on global evaluation corpora) and evaluated on their test set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.2 Local Evaluation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The results are shown in Table 6 . STM is the single-task models we implemented, following the settings in Wang et al. (2018) . The SOTA is achieved by Wang et al. (2018) with multi-task model, which is shown in the table as MTM Wang et al. (2018) . They trained their model on BC2GC, BC4CHM, NCBI, JNLPBA, and BC5CDR. MTM (ours) is the multi-task model we trained on our five training corpora and used as a baseline in the global evaluations. It has the same architecture as Wang et al. (2018) . As we can see, MTM Wang et al. (2018) achieves the best results on 3 out of 4 datasets. And our MTM achieves very similar results, showing it is a strong model on training corpora. Our proposed models do not perform very well when evaluated on the training corpora. But in the global evaluation setting, they perform much better compared to our strong MTM. This demonstrates the superiority of our proposed models on task adaptation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 125, |
|
"text": "Wang et al. (2018)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 152, |
|
"end": 170, |
|
"text": "Wang et al. (2018)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 225, |
|
"end": 247, |
|
"text": "MTM Wang et al. (2018)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 476, |
|
"end": 494, |
|
"text": "Wang et al. (2018)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 512, |
|
"end": 534, |
|
"text": "MTM Wang et al. (2018)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 25, |
|
"end": 32, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.2 Local Evaluation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://corposaurus.github.io/ corpora/ summarizes dozens of partially annotated biomedical datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We further verified this conclusion by computing the heat maps on the original datasets. The overlaps between BC2GM and BC4CHEMD, and BC2GM and Linnaeus are nearly 0.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A lower recall and f1 score was observed in the initial experiment without this heuristic.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank the anonymous reviewers for their constructive comments, as well as the members of the USC PLUS lab for their early feedbacks. We thank Tianyu Meng and Yuxin Zhou for their help with initial data processing and experimental setup. This work is supported in part by DARPA (HR0011-15-C-0115) and an NIH R01 (LM012592). Approved for Public Release, Distribution Unlimited. The views expressed are those of the authors and do not reflect the official policy or position of the sponsors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Named Entities Sents", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpus", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "A comparison of the events and relations across ACE, ERE, TAC-KBP, and FrameNet annotation standards", |
|
"authors": [ |
|
{ |
|
"first": "Jacqueline", |
|
"middle": [], |
|
"last": "Aguilar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Charley", |
|
"middle": [], |
|
"last": "Beller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Mcnamee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Durme", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephanie", |
|
"middle": [], |
|
"last": "Strassel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyi", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joe", |
|
"middle": [], |
|
"last": "Ellis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "ACL Workshop: EVENTS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacqueline Aguilar, Charley Beller, Paul McNamee, Ben V. Durme, Stephanie Strassel, Zhiyi Song, and Joe Ellis. 2014. A comparison of the events and re- lations across ACE, ERE, TAC-KBP, and FrameNet annotation standards. In ACL Workshop: EVENTS.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Learning extractors from unlabeled text using relevant databases", |
|
"authors": [ |
|
{ |
|
"first": "Kedar", |
|
"middle": [], |
|
"last": "Bellare", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Sixth international workshop on information integration on the web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kedar Bellare and Andrew McCallum. 2007. Learn- ing extractors from unlabeled text using relevant databases. In Sixth international workshop on in- formation integration on the web.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Learning long-term dependencies with gradient descent is difficult", |
|
"authors": [ |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrice", |
|
"middle": [], |
|
"last": "Simard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Frasconi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "IEEE Transactions on Neural Networks", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoshua Bengio, Patrice Simard, and Paolo Frasconi. 1994. Learning long-term dependencies with gradi- ent descent is difficult. IEEE Transactions on Neu- ral Networks.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Learning a named entity tagger from gazetteers with the partial perceptron", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Carlson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Scott", |
|
"middle": [], |
|
"last": "Gaffney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Flavian", |
|
"middle": [], |
|
"last": "Vasile", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "AAAI Spring Symposium: Learning by Reading and Learning to Read", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew Carlson, Scott Gaffney, and Flavian Vasile. 2009. Learning a named entity tagger from gazetteers with the partial perceptron. In AAAI Spring Symposium: Learning by Reading and Learning to Read.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "DEFT ERE annotation guidelines: Relations v1.1. Linguistic Data Consortium", |
|
"authors": [], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Linguistic Data Consortium. 2013. DEFT ERE anno- tation guidelines: Relations v1.1. Linguistic Data Consortium, Philadelphia.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Constructing biological knowledge bases by extracting information from text sources", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Craven", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johan", |
|
"middle": [], |
|
"last": "Kumlien", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "ISMB", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Craven and Johan Kumlien. 1999. Constructing biological knowledge bases by extracting informa- tion from text sources. In ISMB.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Learning to extract symbolic knowledge from the world wide web", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Craven", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Pipasquo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dayne", |
|
"middle": [], |
|
"last": "Freitag", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Craven, Andrew McCallum, Dan PiPasquo, Tom Mitchell, and Dayne Freitag. 1998. Learning to ex- tract symbolic knowledge from the world wide web. Technical report, Carnegie-mellon univ pittsburgh pa school of computer Science.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A neural network multi-task learning approach to biomedical named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Gamal", |
|
"middle": [], |
|
"last": "Crichton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sampo", |
|
"middle": [], |
|
"last": "Pyysalo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Billy", |
|
"middle": [], |
|
"last": "Chiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Korhonen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "BMC bioinformatics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gamal Crichton, Sampo Pyysalo, Billy Chiu, and Anna Korhonen. 2017. A neural network multi-task learn- ing approach to biomedical named entity recogni- tion. BMC bioinformatics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Ncbi disease corpus: a resource for disease name recognition and concept normalization", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Rezarta Islamaj Dogan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyong", |
|
"middle": [], |
|
"last": "Leaman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Journal of biomedical informatics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rezarta Islamaj Dogan, Robert Leaman, and Zhiyong Lu. 2014. Ncbi disease corpus: a resource for dis- ease name recognition and concept normalization. Journal of biomedical informatics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Learning from partially annotated sequences", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Eraldo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulf", |
|
"middle": [], |
|
"last": "Fernandes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Brefeld", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Joint European Conference on Machine Learning and Knowledge Discovery in Databases", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "407--422", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eraldo R Fernandes and Ulf Brefeld. 2011. Learn- ing from partially annotated sequences. In Joint European Conference on Machine Learning and Knowledge Discovery in Databases, pages 407-422. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Linnaeus: a species name identification system for biomedical literature", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Gerner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Goran", |
|
"middle": [], |
|
"last": "Nenadic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Casey", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Bergman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "BMC bioinformatics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Gerner, Goran Nenadic, and Casey M Bergman. 2010. Linnaeus: a species name identification sys- tem for biomedical literature. BMC bioinformatics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Marginal likelihood training of bilstm-crf for biomedical named entity recognition from disjoint label sets", |
|
"authors": [ |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Greenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trapit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Verga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2824--2829", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nathan Greenberg, Trapit Bansal, Patrick Verga, and Andrew McCallum. 2018. Marginal likelihood training of bilstm-crf for biomedical named entity recognition from disjoint label sets. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2824-2829.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Discovering relations among named entities from large corpora", |
|
"authors": [ |
|
{ |
|
"first": "Takaaki", |
|
"middle": [], |
|
"last": "Hasegawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Satoshi", |
|
"middle": [], |
|
"last": "Seki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Takaaki Hasegawa, Satoshi Seki, and Ralph Grishman. 2004. Discovering relations among named entities from large corpora. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural computation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Overview of the tac 2010 knowledge base population track", |
|
"authors": [ |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hoa", |
|
"middle": [ |
|
"Trang" |
|
], |
|
"last": "Dang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kira", |
|
"middle": [], |
|
"last": "Griffitt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joe", |
|
"middle": [ |
|
"Ellis" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Heng Ji, Ralph Grishman, Hoa Trang Dang, Kira Grif- fitt, and Joe Ellis. 2010. Overview of the tac 2010 knowledge base population track. In TAC.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Introduction to the bio-entity recognition task at jnlpba", |
|
"authors": [ |
|
{ |
|
"first": "Jin-Dong", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomoko", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshimasa", |
|
"middle": [], |
|
"last": "Tsuruoka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuka", |
|
"middle": [], |
|
"last": "Tateisi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nigel", |
|
"middle": [], |
|
"last": "Collier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jin-Dong Kim, Tomoko Ohta, Yoshimasa Tsuruoka, Yuka Tateisi, and Nigel Collier. 2004. Introduc- tion to the bio-entity recognition task at jnlpba. In JNLPBA. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "The genia event extraction shared task, 2013 edition-overview", |
|
"authors": [ |
|
{ |
|
"first": "Jin-Dong", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yamamoto", |
|
"middle": [], |
|
"last": "Yasunori", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "BioNLP Shared Task 2013 Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jin-Dong Kim, Yue Wang, and Yamamoto Yasunori. 2013. The genia event extraction shared task, 2013 edition-overview. In BioNLP Shared Task 2013 Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Chemdner: The drugs and chemical names extraction challenge", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Krallinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Leitner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Obdulia", |
|
"middle": [], |
|
"last": "Rabal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miguel", |
|
"middle": [], |
|
"last": "Vazquez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Journal of Cheminfo", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Krallinger, Florian Leitner, Obdulia Rabal, Miguel Vazquez, Julen Oyarzabal, and Alfonso Va- lencia. 2015. Chemdner: The drugs and chemical names extraction challenge. Journal of Cheminfo.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Conditional random fields: Probabilistic models for segmenting and labeling sequence data", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Lafferty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando Cn", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "ICML", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Lafferty, Andrew McCallum, and Fernando CN Pereira. 2001. Conditional random fields: Prob- abilistic models for segmenting and labeling se- quence data. In ICML.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Neural architectures for named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miguel", |
|
"middle": [], |
|
"last": "Ballesteros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandeep", |
|
"middle": [], |
|
"last": "Subramanian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazuya", |
|
"middle": [], |
|
"last": "Kawakami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample, Miguel Ballesteros, Sandeep Sub- ramanian, Kazuya Kawakami, and Chris Dyer. 2016. Neural architectures for named entity recognition. In NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Learning from positive and unlabeled examples with different data distributions", |
|
"authors": [ |
|
{ |
|
"first": "Xiao-Li", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "ECML", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiao-Li Li and Bing Liu. 2005. Learning from positive and unlabeled examples with different data distribu- tions. In ECML.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Empower Sequence Labeling with Task-Aware Neural Language Model", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Shang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Gui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "L. Liu, J. Shang, F. Xu, X. Ren, H. Gui, J. Peng, and J. Han. 2018. Empower Sequence Labeling with Task-Aware Neural Language Model. In AAAI.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Domain adaptation for crf-based chinese word segmentation using free annotations", |
|
"authors": [ |
|
{ |
|
"first": "Yijia", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wanxiang", |
|
"middle": [], |
|
"last": "Che", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fan", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "864--874", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/D14-1093" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yijia Liu, Yue Zhang, Wanxiang Che, Ting Liu, and Fan Wu. 2014. Domain adaptation for crf-based chinese word segmentation using free annotations. In Proceedings of the 2014 Conference on Em- pirical Methods in Natural Language Processing (EMNLP), pages 864-874. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Chemdner system with mixed conditional random fields and multi-scale word clustering", |
|
"authors": [ |
|
{ |
|
"first": "Yanan", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Donghong", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoyuan", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaomei", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaohui", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Journal of cheminformatics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yanan Lu, Donghong Ji, Xiaoyuan Yao, Xiaomei Wei, and Xiaohui Liang. 2015. Chemdner system with mixed conditional random fields and multi-scale word clustering. Journal of cheminformatics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "End-to-end sequence labeling via bi-directional lstm-cnns-crf", |
|
"authors": [ |
|
{ |
|
"first": "Xuezhe", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xuezhe Ma and Eduard Hovy. 2016. End-to-end se- quence labeling via bi-directional lstm-cnns-crf. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Early results for named entity recognition with conditional random fields, feature induction and web-enhanced lexicons", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew McCallum and Wei Li. 2003. Early results for named entity recognition with conditional random fields, feature induction and web-enhanced lexicons. In NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Subsequence kernels for relation extraction", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Raymond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mooney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Razvan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bunescu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Raymond J Mooney and Razvan C Bunescu. 2005. Subsequence kernels for relation extraction. In NIPS.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Annotating and evaluating text for stem cell research", |
|
"authors": [ |
|
{ |
|
"first": "Mariana", |
|
"middle": [], |
|
"last": "Neves", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Damas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Kurtz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulf", |
|
"middle": [], |
|
"last": "Leser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "BioTxtM workshop at LREC on Building and Evaluation Resources", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mariana Neves, Alexander Damas, Andreas Kurtz, and Ulf Leser. 2012. Annotating and evaluating text for stem cell research. In BioTxtM workshop at LREC on Building and Evaluation Resources.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Named entity recognition for chinese social media with jointly trained embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Dredze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nanyun Peng and Mark Dredze. 2015. Named en- tity recognition for chinese social media with jointly trained embeddings. In Proceedings of the 2015 Conference on Empirical Methods in Natural Lan- guage Processing (EMNLP), Lisboa, Portugal.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Improving named entity recognition for chinese social media via learning segmentation representations", |
|
"authors": [ |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Dredze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nanyun Peng and Mark Dredze. 2016. Improving named entity recognition for chinese social media via learning segmentation representations. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Multi-task domain adaptation for sequence tagging", |
|
"authors": [ |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Dredze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceddings of the ACL Workshop on Representation Learning for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nanyun Peng and Mark Dredze. 2017. Multi-task domain adaptation for sequence tagging. In Pro- ceddings of the ACL Workshop on Representation Learning for NLP.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Anatomical entity mention recognition at literature scale", |
|
"authors": [ |
|
{ |
|
"first": "Sampo", |
|
"middle": [], |
|
"last": "Pyysalo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sophia", |
|
"middle": [], |
|
"last": "Ananiadou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Bioinformatics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sampo Pyysalo and Sophia Ananiadou. 2013. Anatomical entity mention recognition at literature scale. Bioinformatics.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Named entity recognition in tweets: an experimental study", |
|
"authors": [ |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Ritter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alan Ritter, Sam Clark, Oren Etzioni, et al. 2011. Named entity recognition in tweets: an experimental study. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Introduction to the conll-2003 shared task: Languageindependent named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Erik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fien", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "De Meulder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik F Sang and Fien De Meulder. 2003. Intro- duction to the conll-2003 shared task: Language- independent named entity recognition. arXiv preprint cs/0306050.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Learning named entity tagger using domain-specific dictionary", |
|
"authors": [ |
|
{ |
|
"first": "Jingbo", |
|
"middle": [], |
|
"last": "Shang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaotao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teng", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiawei", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1809.03599" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jingbo Shang, Liyuan Liu, Xiang Ren, Xiaotao Gu, Teng Ren, and Jiawei Han. 2018. Learning named entity tagger using domain-specific dictio- nary. arXiv preprint arXiv:1809.03599.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Overview of biocreative ii gene mention recognition", |
|
"authors": [ |
|
{ |
|
"first": "Larry", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Lorraine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rie", |
|
"middle": [], |
|
"last": "Tanabe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cheng-Ju", |
|
"middle": [], |
|
"last": "Johnson Nee Ando", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I-Fang", |
|
"middle": [], |
|
"last": "Kuo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chun-Nan", |
|
"middle": [], |
|
"last": "Chung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu-Shi", |
|
"middle": [], |
|
"last": "Hsu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christoph", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Klinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kuzman", |
|
"middle": [], |
|
"last": "Friedrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ganchev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Genome biology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Larry Smith, Lorraine K Tanabe, Rie Johnson nee Ando, Cheng-Ju Kuo, I-Fang Chung, Chun-Nan Hsu, Yu-Shi Lin, Roman Klinger, Christoph M Friedrich, Kuzman Ganchev, et al. 2008. Overview of biocreative ii gene mention recognition. Genome biology.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "A machine learning approach to coreference resolution of noun phrases", |
|
"authors": [], |
|
"year": 2001, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wee Meng Soon, Hwee Tou Ng, and Daniel Chung Yong Lim. 2001. A machine learning ap- proach to coreference resolution of noun phrases. Computational linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Introduction to the conll-2003 shared task: Language-independent named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Erik F Tjong Kim", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fien", |
|
"middle": [], |
|
"last": "De Meulder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "CoNLL at HLT-NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik F Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the conll-2003 shared task: Language-independent named entity recognition. In CoNLL at HLT-NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Training conditional random fields using incomplete annotations", |
|
"authors": [ |
|
{ |
|
"first": "Yuta", |
|
"middle": [], |
|
"last": "Tsuboi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hisashi", |
|
"middle": [], |
|
"last": "Kashima", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shinsuke", |
|
"middle": [], |
|
"last": "Mori", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroki", |
|
"middle": [], |
|
"last": "Oda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuji", |
|
"middle": [], |
|
"last": "Matsumoto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 22nd International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "897--904", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuta Tsuboi, Hisashi Kashima, Shinsuke Mori, Hiroki Oda, and Yuji Matsumoto. 2008. Training condi- tional random fields using incomplete annotations. In Proceedings of the 22nd International Conference on Computational Linguistics (Coling 2008), pages 897-904. Coling 2008 Organizing Committee.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Ace 2005 multilingual training corpus. LDC", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Walker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephanie", |
|
"middle": [], |
|
"last": "Strassel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julie", |
|
"middle": [], |
|
"last": "Medero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazuaki", |
|
"middle": [], |
|
"last": "Maeda", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher Walker, Stephanie Strassel, Julie Medero, and Kazuaki Maeda. 2006. Ace 2005 multilingual training corpus. LDC.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Cross-type biomedical named entity recognition with deep multi-task learning", |
|
"authors": [ |
|
{ |
|
"first": "Xuan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuhao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marinka", |
|
"middle": [], |
|
"last": "Zitnik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingbo", |
|
"middle": [], |
|
"last": "Shang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Curtis", |
|
"middle": [], |
|
"last": "Langlotz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiawei", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xuan Wang, Yu Zhang, Xiang Ren, Yuhao Zhang, Marinka Zitnik, Jingbo Shang, Curtis Langlotz, and Jiawei Han. 2018. Cross-type biomedical named entity recognition with deep multi-task learning. CoRR.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Overview of the biocreative v chemical disease relation (cdr) task", |
|
"authors": [ |
|
{ |
|
"first": "Chih-Hsuan", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yifan", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Leaman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Allan", |
|
"middle": [ |
|
"Peter" |
|
], |
|
"last": "Davis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carolyn", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Mattingly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyong", |
|
"middle": [], |
|
"last": "Wiegers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "BC V Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chih-Hsuan Wei, Yifan Peng, Robert Leaman, Al- lan Peter Davis, Carolyn J Mattingly, Jiao Li, Thomas C Wiegers, and Zhiyong Lu. 2015. Overview of the biocreative v chemical disease re- lation (cdr) task. In BC V Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Semi-supervised chinese word segmentation using partial-label learning with conditional random fields", |
|
"authors": [ |
|
{ |
|
"first": "Fan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Vozila", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fan Yang and Paul Vozila. 2014. Semi-supervised chi- nese word segmentation using partial-label learning with conditional random fields. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Distantly supervised ner with partial annotation learning and reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "Yaosheng", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenliang", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhenghua", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengqiu", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2159--2169", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yaosheng Yang, Wenliang Chen, Zhenghua Li, Zhengqiu He, and Min Zhang. 2018. Distantly su- pervised ner with partial annotation learning and re- inforcement learning. In Proceedings of the 27th In- ternational Conference on Computational Linguis- tics, pages 2159-2169.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "The wordlevel BiLSTM then takes the concatenation of h c i and the word embedding as input x i = [e i ; h c i ] to learn contextualized representations.", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"text": "Original likelihood function with global annotation (b) Original likelihood function with partial annotation (c) Improved likelihood function with partial annotation", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"text": "Illustration of original (2a, 2b) and improved (2c) likelihood functions. Each figure has two parts upper and lower that illustrate the gold energy (numerator) and the partition (denominator) respectively. Solid lines represent tag sequences that are fully considered in the functions. Dashed lines represent tag sequences that are discounted. The sentences in 2b and 2c are not annotated with LOC.", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"text": "(a) The mention-level overlap among training sets. (b) The mention-level overlap between training datasets and evaluation datasets.", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF4": { |
|
"num": null, |
|
"text": "tuned both discounting factors M and M in the range of [0,0.2,0.4,0.6,0.8,1.0]. It turns out that M = 0, M = 0 (using improved partition function) and M = 1, M = 1 (using improved gold energy function) make two local optimums. Therefore we report the performance of three special cases of our proposed framework, with M, M = [0, 0], [1, 1], and [0, 1] (the naive model), respectively.", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"TABREF2": { |
|
"text": "Vote 63.6 64.4 62.8 61.0 56.7 65.9 50.4 44.8 57.5 83.9 88.4 79.8 Unified-01 42.7 93.7 27.6 37.5 72.5 25.3 23.6 50.8 15.4 01.6 97.8 00.8 Unified-11 70.2 73.8 67.0 67.7 64.0 71.9 53.2 47.1 61.1 80.1 84.6 76.1 Unified-00 73.8 84.1 65.7 69.7 68.1 71.5 52.7 49.4 56.5 84.8 90.0 80.2", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td/><td colspan=\"7\">Trained on Other Biomedical Datasets</td><td/><td colspan=\"3\">Traind on CoNLL</td></tr><tr><td>Corpus</td><td>BC5CDR</td><td/><td colspan=\"3\">BioNLP13CG</td><td colspan=\"3\">BioNLP11ID</td><td/><td colspan=\"2\">CoNLL 2003</td></tr><tr><td>F</td><td>P</td><td>R</td><td>F</td><td>P</td><td>R</td><td>F</td><td>P</td><td>R</td><td>F</td><td>P</td><td>R</td></tr><tr><td>MTM-</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"3\">2. It is shown that</td></tr></table>" |
|
}, |
|
"TABREF4": { |
|
"text": "Statistics for the CoNLL 2003 NER datasetNCBI-disease is a disease corpus. The annotation is Disease. It was introduced for disease name recognition and normalization.", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td>JNLPBA</td><td>consists</td><td>of</td><td>DNA,</td><td>RNA,</td></tr><tr><td colspan=\"5\">Gene/Protein, Cell line, Cell Type. The an-</td></tr><tr><td colspan=\"5\">notation is same as the NE names, except the</td></tr><tr><td colspan=\"5\">Gene/Protein is annotated with Protein. It was</td></tr><tr><td colspan=\"5\">provided by 2004 JNLPBA Shared Task for</td></tr><tr><td colspan=\"3\">biomedical entity recognition.</td><td/><td/></tr></table>" |
|
} |
|
} |
|
} |
|
} |