|
{ |
|
"paper_id": "Q15-1036", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:07:28.393120Z" |
|
}, |
|
"title": "Plato: A Selective Context Model for Entity Resolution", |
|
"authors": [ |
|
{ |
|
"first": "Nevena", |
|
"middle": [], |
|
"last": "Lazic", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Google Inc", |
|
"location": { |
|
"postCode": "94043", |
|
"settlement": "Mountain View", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Amarnag", |
|
"middle": [], |
|
"last": "Subramanya", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Google Inc", |
|
"location": { |
|
"postCode": "94043", |
|
"settlement": "Mountain View", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Ringgaard", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Google Inc", |
|
"location": { |
|
"postCode": "94043", |
|
"settlement": "Mountain View", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Google Inc", |
|
"location": { |
|
"postCode": "94043", |
|
"settlement": "Mountain View", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We present Plato, a probabilistic model for entity resolution that includes a novel approach for handling noisy or uninformative features, and supplements labeled training data derived from Wikipedia with a very large unlabeled text corpus. Training and inference in the proposed model can easily be distributed across many servers, allowing it to scale to over 10 7 entities. We evaluate Plato on three standard datasets for entity resolution. Our approach achieves the best results to-date on TAC KBP 2011 and is highly competitive on both the CoNLL 2003 and TAC KBP 2012 datasets.", |
|
"pdf_parse": { |
|
"paper_id": "Q15-1036", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We present Plato, a probabilistic model for entity resolution that includes a novel approach for handling noisy or uninformative features, and supplements labeled training data derived from Wikipedia with a very large unlabeled text corpus. Training and inference in the proposed model can easily be distributed across many servers, allowing it to scale to over 10 7 entities. We evaluate Plato on three standard datasets for entity resolution. Our approach achieves the best results to-date on TAC KBP 2011 and is highly competitive on both the CoNLL 2003 and TAC KBP 2012 datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Given a document collection and a knowledge base (KB) of entities, entity resolution, also known as entity disambiguation or entity linking, is the process of mapping each entity mention in a document to the corresponding entity record in the KB (Bunescu and Pasca, 2006; Cucerzan, 2007; Dredze et al., 2010; Hachey et al., 2013) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 246, |
|
"end": 271, |
|
"text": "(Bunescu and Pasca, 2006;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 272, |
|
"end": 287, |
|
"text": "Cucerzan, 2007;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 288, |
|
"end": 308, |
|
"text": "Dredze et al., 2010;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 309, |
|
"end": 329, |
|
"text": "Hachey et al., 2013)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Entity resolution is challenging because referring expressions are often ambiguous on their own and can only be disambiguated by their surrounding context. Consider the name Newcastle; it can refer to the city of Newcastle upon Tyne in UK, to the football (soccer for US readers) club Newcastle United F.C., to a popular beverage (Newcastle Brown Ale), or to several other entities. The ambiguity can only be resolved with appropriate context. Another complicating factor is that no KB is complete, and so a name in a document may refer to an entity that is missing from the KB. This problem is commonly called NIL detection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper we present a probabilistic model for entity resolution. Our system, hereafter referred to as Plato, is designed to be resilient to irrelevant features and can be seen as a selective extension of the na\u00efve Bayes model. Specifically, we assume that most of the context features of a mention are irrelevant to its disambiguation. This contrasts with the na\u00efve Bayes assumption that all features are generated from a class-conditional distribution and are thus all relevant to the class assignment. Our empirical results support this modeling choice. We train Plato in a semi-supervised manner, starting with labeled data derived from Wikipedia, and continuing with a very large unlabeled corpus of Web documents. The use of unlabeled data enables us to obtain a better estimate of feature distributions and discover new features that are not present in the (labeled) training data. Plato scales up easily to very large KBs with millions of entities and includes NIL detection as a natural by-product of inference. We named our system after the Greek philosopher because the system's inference of real underlying entities from imperfect evidence reminds us of Plato's Theory of Forms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Previous entity resolution studies (Milne and Witten, 2008; Cucerzan, 2007; Ratinov et al., 2011; Hoffart et al., 2011; Hachey et al., 2013) have typically relied on three main components: a mention model, a context model, and a coherency model. The mention model, perhaps the most important component (Hachey et al., 2013) , estimates the prior belief that a particular phrase refers to a particular entity 503 Transactions of the Association for Computational Linguistics, vol. 3, pp. 503-515, 2015. Action Editor: Noah Smith.", |
|
"cite_spans": [ |
|
{ |
|
"start": 35, |
|
"end": 59, |
|
"text": "(Milne and Witten, 2008;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 60, |
|
"end": 75, |
|
"text": "Cucerzan, 2007;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 76, |
|
"end": 97, |
|
"text": "Ratinov et al., 2011;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 98, |
|
"end": 119, |
|
"text": "Hoffart et al., 2011;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 120, |
|
"end": 140, |
|
"text": "Hachey et al., 2013)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 302, |
|
"end": 323, |
|
"text": "(Hachey et al., 2013)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Submission batch: 4/2015; Revision batch: 6/2015; Published 9/2015. c 2015 Association for Computational Linguistics. Distributed under a CC-BY 4.0 license. in the KB. In addition to providing a prior, the mention model also helps efficient inference by giving zero probability to entities that are never referred to by a particular name. The context model helps disambiguate the entity using the textual context of the mention. This includes both features extracted from the immediate context (such as the enclosing sentence) and from the overall discourse (such as the most salient noun phrases in the document). Finally, the coherency model encourages all referring expressions in a document to resolve to entities that are related to each other in the KB. For example, a mention of Sunderland A.F.C. (a rival football club to Newcastle United F.C.) may reduce the uncertainty about the mention Newcastle. Since a coherency model introduces dependencies between the resolutions of all the mentions in a document, it is seen as a global model, while mention and context models are usually referred to as local (Ratinov et al., 2011) . Coherency models typically have an increased inference cost, as they require access to the relevant entity relationships in the KB.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1112, |
|
"end": 1134, |
|
"text": "(Ratinov et al., 2011)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Plato does not include a full coherency component. Instead, mentions in a given document are sorted into coreference clusters by a simple within-document coreference algorithm similar to that of Haghighi and Klein (2009) . Each coreference cluster is then resolved to the KB independently of the resolution of the other clusters in the document. The context features for each mention cluster in our model include the names of other referring phrases in the document. Since many referring phrases are unambiguous, our hypothesis is that such context can capture much of the discourse coherence usually represented by a coherency model. Plato detects and links both nominal and named mentions, but following previous work, we evaluate it only on the resolution of gold named entity mentions to either KB or NIL.", |
|
"cite_spans": [ |
|
{ |
|
"start": 195, |
|
"end": 220, |
|
"text": "Haghighi and Klein (2009)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We train Plato with expectation-maximization (EM), which is easily parallelizable and thus can scale up to very large KBs and unlabeled training corpora. Indeed, our efficient distributed implementation allows the system to scale up to KBs with over 10 7 entities. Plato achieves highly competitive results on several benchmarks: CoNLL 2003, TAC 2012 KBP, and TAC 2011 KBP. Most importantly, this performance is \"out-of-the-box\": we did not use any of the corresponding training sets, labeled or not, to train the model or tune hyperparameters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We are given a document collection where all the candidate entity mentions have been identified. Each mention is characterized by its phrase, and by the document context. The context is abstracted as a (multi)set of features that includes phrases related to the mention by linear order, syntax, or withindocument coreference and phrases extracted from the whole enclosing document. Context features for a mention depend only on the document text, and not on the results of the entity resolution for other mentions. Therefore, we can treat each mention (more strictly speaking, each coreference cluster) independently. Finally, we are given the set E of KB entities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Definitions and Notation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "When discussing probabilistic models, we use uppercase for random variables, lowercase for the values they take, and boldface for vectors. We use w m to represent the phrase of mention m. The context of mention m is represented either as a binary feature presence vector b m \u2208 {0, 1} |F | , or as a feature count vector c m \u2208 N |F | . The random variable E m \u2208 E ranges over the possible candidate entities for mention m. We provide more details on how the candidates are obtained in Section 8.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Definitions and Notation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We start with a na\u00efve Bayes model that will serve as a source of intuition and an evaluation baseline. In this model, the phrase and context of a mention are conditionally independent given the entity:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Na\u00efve Bayes Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(e|c, w) \u221d p(e)p(w|e)p(c|e).", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Na\u00efve Bayes Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "If we assume that the set of feature counts is drawn from the multinomial distribution then", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Na\u00efve Bayes Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "p(c|e, \u03b8 e ) = ( k c k )! c 1 ! . . . c |F | ! k \u03b8 e,k c k ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Na\u00efve Bayes Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "where \u03b8 e,k is the probability of drawing feature k given that the entity is e. The posterior probability of an entity given the context feature counts c and the mention phrase w is", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Na\u00efve Bayes Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "p(e|c, w) \u221d p(e)p(w|e) k \u03b8 e,k c k .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Na\u00efve Bayes Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "At first sight, the na\u00efve Bayes model seems well suited for the entity resolution problem. It is very simple to implement. Given labeled data, the maximum likelihood (ML) estimate of the parameters \u03b8 e can be obtained from data counts. Unlabeled training data can be incorporated using the EM algorithm, which lends itself to easy parallelization. We implemented a na\u00efve Bayes model and used it for resolving entities in the CoNLL corpus (Hoffart et al., 2011) and the TAC KBP corpora (Ji et al., 2011; Mayfield et al., 2012) , as discussed in more detail in Section 9. We found that the performance of the model was only slightly better than using only the mention phrase. We hypothesize that for the more difficult cases in the test set, many context features of a mention are not informative in identifying the entity, contrary to the model assumption that all context features are drawn from an entityconditional distribution. Consider the following example:", |
|
"cite_spans": [ |
|
{ |
|
"start": 438, |
|
"end": 460, |
|
"text": "(Hoffart et al., 2011)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 485, |
|
"end": 502, |
|
"text": "(Ji et al., 2011;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 503, |
|
"end": 525, |
|
"text": "Mayfield et al., 2012)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Na\u00efve Bayes Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "While Solon may have slipped slightly this year in Cleveland magazine's ranking of best suburbs, it climbed higher on a more prestigious list. On Monday, the city placed 23rd on Money magazine's annual list of best places to live in the United States.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Na\u00efve Bayes Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "There are five US locations named Solon in Wikipedia (in addition to the pre-Socratic Athenian statesman). In the above, Solon refers to a suburb of Cleveland, Ohio. The only context feature that helps us discriminate between the different possible disambiguations is Cleveland; the remaining features (such as Money magazine, United States) could easily appear in the context of the other candidates. Thus, combining the evidence from all features may blur the distinction between these entities. Our selective context model aims to address this issue.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Na\u00efve Bayes Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Our selective context model assumes that most features that appear in the context of a mention are not discriminative for entity disambiguation. In particular, we make a simplifying modeling assumption that exactly one context feature is relevant for disambiguation, and the remaining features are drawn from a background distribution. Let K be the random variable corresponding to the index of the relevant feature for a mention of entity e. The model can be written as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Selective context model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "p(k, e, w, b) =p(w)p(e|w)p(k|e) j p(b j |k) p(b j |k) = b j if k = j \u03b2 b j j (1 \u2212 \u03b2 j ) 1\u2212b j if k = j.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Selective context model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Here \u03b2 j parameterizes the background probability of observing a feature that is not relevant. We impose the constraint that the relevant k th feature must be on, and hence p(b k |K = k) = b k . We treat mentions (or mention clusters) as independent and identically distributed; the complete model is shown in Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 310, |
|
"end": 319, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Selective context model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Given a test mention (w , b ), we can compute the entity posterior by marginalizing out the latent variable k:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Selective context model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(e|b , w ) \u221d p(e|w ) k p(k|e)b k j =k p(b j |K = j) \u221d p(e|w ) k b k p(k|e) \u03b2 k .", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Selective context model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Thus the entity posterior is a product of the name score (p(e|w )) and context score ( k b k p(k|e)/\u03b2 k ). This is intuitively appealing: if we assume that \u03b2 k \u2248 p(k) then p(k|e)/\u03b2 k is similar to the pointwise mutual information between feature k and entity e. 1 Thus the context score is the sum of the scores for all features present in the context.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Selective context model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Finally, it is important to note that our modeling goal here is not parameter sparsity, but rather capturing the sparsity of useful disambiguating features that occur in the context of each entity mention. In fact, the model parameters are not sparse in practice.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Selective context model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We parameterize the model as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning and Inference", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 vectors \u03c4 w parameterize the conditional probability of an entity given the phrase w, with \u03c4 w,e = p(E = e|W = w)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning and Inference", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 vectors \u03c1 e parameterize the probability of relevant features for entity e, with \u03c1 e,k = p(K = k|E = e)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning and Inference", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 scalars \u03b2 j = p(B j = 1|K = j) parameterize the background feature distribution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning and Inference", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We estimate the maximum likelihood parameters from both labeled and unlabeled data. The latent variables in our model are the relevant feature indices k m for all mentions m, as well as the entities e m for the unlabeled mentions. We approximate the posterior distribution over latent variables as a product of marginals, and use the following auxiliary distributions:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning and Inference", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 q m (e) is the probability that mention m resolves to entity e, set to the ground truth for the labeled mentions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning and Inference", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 s m (k) is the auxiliary distribution over the relevant feature index for mention m.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning and Inference", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We describe two approaches to estimating the parameters: (1) standard EM algorithm, where we infer all latent variables, and (2) a memory-efficient alternative. While both these approaches can be implemented using a distributed processing framework such as map-reduce (Dean and Ghemawat, 2008) , the latter where we only infer the missing entity labels scales better than the standard EM approach. Simulations on synthetic data suggest that the two algorithms have similar performance (see Section 5.3) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 268, |
|
"end": 293, |
|
"text": "(Dean and Ghemawat, 2008)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning and Inference", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The EM algorithm for the model performs coordinate ascent in the following lower bound on the likelihood of observed data:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Standard EM algorithm", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "L = m e q m (e) w [w m = w] ln \u03c4 e,w + k s m (k) ln(b m,k \u03c1 k,e ) \u2212 ln \u03b2 k + j b m,j ln \u03b2 j + (1 \u2212 b m,j ) ln(1 \u2212 \u03b2 j ) + H(q) + H(s) + const where [\u2022]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Standard EM algorithm", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "is the Iverson bracket, and H(\u2022) is the entropy function. It can be shown that the iterative updates are given by:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Standard EM algorithm", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "E-step: q m (e) \u221d \u03c4 w,e exp k s m (k) ln \u03c1 e,k /\u03b2 k s m (k) = b m,k \u03b2 k exp e q m (e) ln \u03c1 e,k M-step: \u03c4 w,e \u221d m q m (e)[w m = w] \u03c1 e,k \u221d m q m (e)s m (k) \u03b2 j = m (b m,j \u2212 s m (j)) m (1 \u2212 s m (j)) 5.2 Memory-efficient EM algorithm", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Standard EM algorithm", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "One practical drawback to using the full EM algorithm is that maintaining the auxiliary distributions q m (e) and s m (k) requires a very large amount of memory, since they scale with the data. In this section we propose a simpler memory-efficient alternative, where we only update q m (e). We perform the E-step according to the entity posterior equation (2). In the M-step, rather than updating parameters {\u03b2 j }, we use empirical marginals. To update parameters {\u03c1 e }, we approximate s m (k) by a fixed uniform distribution over features that fired, s m (j) = b m,j / k b m,k . The update for \u03c4 remains the same as before. The memory-efficient EM algorithm is: E-step:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Standard EM algorithm", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "q m (e) \u221d \u03c4 w,e k b m,k \u03c1 e,k \u03b2 k (3) M-step: \u03c4 w,e \u221d m q m (e)[w m = w] (4) \u03c1 e,k \u221d m q m (e)s m (k)", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Standard EM algorithm", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b2 j = 1 M m b m,j", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Standard EM algorithm", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "where M is the number of mentions. Note that these updates can be efficiently implemented in a mapreduce framework, where the map (E-step) computes the distribution q m (e), and the reduce (M-step) updates the parameters. This is the learning algorithm we use for all our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Standard EM algorithm", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We compared the performance of the two selective context EM algorithms (standard EM and its memory efficient variant) and the EM algorithm for na\u00efve Bayes on synthetic data generated from our model. We left out the mention prior and only evaluated the context part. We assumed that there are |E| = 10 equiprobable entities, M = 2, 000 mentions, and |F| = 200 possible features. Each entity was assigned a set of 5-10 randomly selected relevant features (these feature sets were allowed to overlap). For each mention, we drew one relevant feature according to \u03c1 e , and a number of other features according to {\u03b2 j }. We sampled parameters {\u03c1 e } from a symmetric Dirichlet distribution, and parameters {\u03b2 j } from a uniform distribution in [0, \u03c3], where \u03c3 roughly controlled the number of noisy features introduced. We generated synthetic datasets with \u03c3 \u2208 {0.02, 0.03, 0.05, 0.07, 0.1, 0.15, 0.22, 0.33}. We then removed labels from half of the mentions and ran the three inference algorithms. To compare the results, we computed micro-averaged precision and recall over the unlabeled mentions, since these are the metrics we are ultimately interested in. The results are shown in Fig. 2 , where each curve corresponds to a dataset. It is evident that the performance na\u00efve Bayes is hindered by spurious features, even though each mention gets at least Figure 2 : Micro-averaged precision-recall curves for the two EM algorithms for the selective context model (see Section 5), and the EM algorithm for the na\u00efve Bayes model. Each color corresponds to a dataset with a different level of noise. It can be seen that in comparison to the na\u00efve Bayes model, the selective context models are more resilient to spurious features. In addition the two learning approaches -standard EM and its memory efficient variant have similar performance. one discriminative feature. The two selective context EM algorithms are more robust to noise; this is as expected, since they are based on the true datagenerating model. Note that the performance of the memory-efficient version is similar to that of the standard EM algorithm.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1182, |
|
"end": 1188, |
|
"text": "Fig. 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1354, |
|
"end": 1362, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Simulation", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Semi-supervised learning has the important benefit of including many informative context features that are not present in the contexts of labeled training mentions. However, this comes at the cost of a very large model that in some cases may not fit in a single computer's memory. Fortunately, our inference algorithm is easily parallelizable. We partition the model across multiple servers in a distributed clientserver architecture. This does not change any of the model parameters but rather partitions them so that they can be loaded into memory on multiple servers. Each entity is assigned to a server according to a heuristic algorithm described below, and model parameters for the entity are stored on the assigned server.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parallel Implementation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Clients process documents to find mentions (if they have not been provided), their contexts, and the context feature vectors for each mention. This process does not require access to the model. Each client stores a lookup table that maps mention phrases to the servers containing entities for which the mention phrase probability is nonzero. To re-solve a mention, the lookup table is consulted to identify the servers that could resolve the mention. All mentions in a document (or a suitable batch of documents) can be sent in parallel to the selected servers. Thus, the time to resolve all mentions in a document is proportional to the maximum response time of an entity server, rather than to the sum of per-mention response times. Further, queries to the same entity server are batched together.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parallel Implementation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Once an entity server receives a query for a mention m (consisting of the phrase w m and context features b m ), it looks up the candidate entities for w m , retrieves model parameters, and returns the entities e 1 , . . . , e n with the n highest p(e|b m , w m ) (Equation 2). These lists are then merged across all the servers responsive to w m to yield the n top-scoring entities for m.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parallel Implementation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "We assign entities to servers by creating a bipartite entity-name graph and applying a greedy graph clustering algorithm that penalizes large clusters as they have a negative impact on load-balancing, while at the same time ensuring that most names will only be in one or a few clusters. Each cluster is then assigned to a server. Phrases such as Washington that evoke many entities may be distributed across multiple servers. Plato can also be made more responsive and resilient to server failure by replicating entity models across multiple servers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parallel Implementation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Entity resolution is a key step in many language-processing tasks such as text classification (Gabrilovich and Markovitch, 2007) , information extraction (Lin et al., 2012) and grounded semantic parsing (Kwiatkowski et al., 2011) . It can also help upstream tasks such as part-of-speech tagging, parsing, and coreference resolution as it provides a link to world knowledge such as entity types, aliases, and gender.", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 128, |
|
"text": "(Gabrilovich and Markovitch, 2007)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 154, |
|
"end": 172, |
|
"text": "(Lin et al., 2012)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 203, |
|
"end": 229, |
|
"text": "(Kwiatkowski et al., 2011)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Early work (Bunescu and Pasca, 2006; Cucerzan, 2007; Mihalcea and Csomai, 2007; Milne and Witten, 2008; Nguyen and Cao, 2008) on the entity resolution problem focused on linking named entities to the relevant Wikipedia pages (also known as Wikification). Most of these systems resolved mentions by defining a similarity score between mention contexts and Wikipedia page contents. Mihalcea and Csomai (2007) and used na\u00efve Bayes models similar to our baseline. Dredze et al. 2010and Ratinov et al. (2011) used a ranking support vector machine (SVM), trained to put the correct entity above the remaining ones. More recently He et al. (2013) used stacked autoencoders to learn a context score.", |
|
"cite_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 36, |
|
"text": "(Bunescu and Pasca, 2006;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 37, |
|
"end": 52, |
|
"text": "Cucerzan, 2007;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 53, |
|
"end": 79, |
|
"text": "Mihalcea and Csomai, 2007;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 80, |
|
"end": 103, |
|
"text": "Milne and Witten, 2008;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 104, |
|
"end": 125, |
|
"text": "Nguyen and Cao, 2008)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 380, |
|
"end": 406, |
|
"text": "Mihalcea and Csomai (2007)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 482, |
|
"end": 503, |
|
"text": "Ratinov et al. (2011)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "While Bunescu and Pasca (2006) and Mihalcea and Csomai (2007) used local models only, others (Cucerzan, 2007; Milne and Witten, 2008; Kulkarni et al., 2009; Ferragina and Scaiella, 2010; Han and Zhao, 2009; Ratinov et al., 2011; Hoffart et al., 2011; He et al., 2013; Alhelbawy and Gaizauskas, 2014; Pershina et al., 2015 ) used a coherency model in conjunction with a local model. One popular approach to coherency has been to use variants of the PageRank algorithm (Page et al., 1999) to re-score candidates (He et al., 2013; Alhelbawy and Gaizauskas, 2014; Pershina et al., 2015) . Pershina et al. (2015) achieve the highest accuracy to date on the CoNLL 2003 dataset using a version of Personalized PageRank. Chisholm and Hachey (2015) demonstrate the benefits of supplementing curated Wikipedia data with a large noisy Web corpus. Their model, relying on simple distance scores and an SVM ranker, achieves highly competitive results when trained on both Wikipedia and the Wikilinks corpus (Orr et al., 2013) . While all of the systems described thus far rely on supervised learning, Plato is semisupervised, and our experimental results (Section 9) confirm that incorporating unlabeled Web data can lead to significant performance gains.", |
|
"cite_spans": [ |
|
{ |
|
"start": 6, |
|
"end": 30, |
|
"text": "Bunescu and Pasca (2006)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 35, |
|
"end": 61, |
|
"text": "Mihalcea and Csomai (2007)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 93, |
|
"end": 109, |
|
"text": "(Cucerzan, 2007;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 110, |
|
"end": 133, |
|
"text": "Milne and Witten, 2008;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 134, |
|
"end": 156, |
|
"text": "Kulkarni et al., 2009;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 186, |
|
"text": "Ferragina and Scaiella, 2010;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 187, |
|
"end": 206, |
|
"text": "Han and Zhao, 2009;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 207, |
|
"end": 228, |
|
"text": "Ratinov et al., 2011;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 229, |
|
"end": 250, |
|
"text": "Hoffart et al., 2011;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 251, |
|
"end": 267, |
|
"text": "He et al., 2013;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 268, |
|
"end": 299, |
|
"text": "Alhelbawy and Gaizauskas, 2014;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 300, |
|
"end": 321, |
|
"text": "Pershina et al., 2015", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 467, |
|
"end": 486, |
|
"text": "(Page et al., 1999)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 510, |
|
"end": 527, |
|
"text": "(He et al., 2013;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 528, |
|
"end": 559, |
|
"text": "Alhelbawy and Gaizauskas, 2014;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 560, |
|
"end": 582, |
|
"text": "Pershina et al., 2015)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 585, |
|
"end": 607, |
|
"text": "Pershina et al. (2015)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 713, |
|
"end": 739, |
|
"text": "Chisholm and Hachey (2015)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 994, |
|
"end": 1012, |
|
"text": "(Orr et al., 2013)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Several recent entity resolution systems (Kataria et al., 2011; Han and Sun, 2012; Houlsby and Ciaramita, 2014) have been based on topic modeling. Han and Sun (2012) associate each document with a single topic; the topic generates entities, and the entities generate context words (non-referrent phrases). Houlsby and Ciaramita (2014) consider each entity to be a topic, and the words generated by the entity include both non-referent and referent phrases in the document, similarly to our context features. While topic modeling approaches are capable of exploiting both labeled and unlabeled data, inference, typically based on sampling, can be extremely slow when the number of entities is very large. In contrast, our inference algorithm is simple to implement, parallelizable, scalable, and easily extended to the semi-supervised setting. Jin et al. (2014) argue that many textual features within Web documents are irrelevant to the entity resolution task. Their method iteratively selects the most discriminative features for each document by trying to minimize the distance to some entity in the KB, and it outperforms a na\u00efve Bayes model that includes all features. In contrast, we incorporate assumptions about feature relevance into our probabilistic model, and do not require access to the KB during learning or inference.", |
|
"cite_spans": [ |
|
{ |
|
"start": 147, |
|
"end": 165, |
|
"text": "Han and Sun (2012)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 843, |
|
"end": 860, |
|
"text": "Jin et al. (2014)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Knowledge Base We use Freebase 2 (Bollacker et al., 2008) as our KB. Freebase data is harvested from many sources, including Wikipedia, AMG, and IMDB. As of this writing, it contains more than 21 million topics and 70 million properties. For a large majority of topics that appear both in Freebase and Wikipedia (Freebase covers more than 95% of Wikipedia), Freebase maintains a link to the Wikipedia page of that topic. While it is feasible to train our models using all of Freebase, for the experiments in this paper we restrict ourselves to the set of entities that appear in both Freebase and Wikipedia, as this is the standard setup used for our evaluation corpora.", |
|
"cite_spans": [ |
|
{ |
|
"start": 33, |
|
"end": 57, |
|
"text": "(Bollacker et al., 2008)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We use all pages in Wikipedia that contain a corresponding topic in Freebase as labeled data. For a given Wikipedia page, we treat the target Wikipedia page of the link as the entity, and the anchor text as a mention of that entity (Milne and Witten, 2008; . We ignore disambiguation pages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 232, |
|
"end": 256, |
|
"text": "(Milne and Witten, 2008;", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Labeled Data", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Unlabeled Data We collected a Web corpus of 50 million pages from a source similar to the Clueweb09 corpus (Hallan and Hoy, 2009) for use as unlabeled data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 129, |
|
"text": "(Hallan and Hoy, 2009)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Labeled Data", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We used three evaluation corpora: (a) CoNLL 2003 3 (Hoffart et al., 2011) , (b) TAC 2011 KBP (Ji et al., 2011) , and (c) TAC 2012 KBP (Mayfield et al., 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 51, |
|
"end": 73, |
|
"text": "(Hoffart et al., 2011)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 93, |
|
"end": 110, |
|
"text": "(Ji et al., 2011)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 134, |
|
"end": 157, |
|
"text": "(Mayfield et al., 2012)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Data and Setup", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The CoNLL dataset contains 1,393 articles with about 34K mentions (Hachey et al., 2013) . For the purpose of comparison with previous work, we evaluate Plato on the 231 test-b documents with 4,483 linkable gold mentions. Performance on those mentions is measured by micro-averaged precision@1, that is, accuracy averaged across mentions. We did not use CoNLL train or test-a documents for training or parameter tuning.", |
|
"cite_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 87, |
|
"text": "(Hachey et al., 2013)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Data and Setup", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The TAC KBP competitions use a subset of a 2008 Wikipedia dump as the reference knowledge base. TAC 2012 evaluation data contains 2,226 gold mentions, of which 1,177 are linkable to the reference KB, and TAC 2011 data contains 2,250 mentions of which 1,123 are linkable to the same KB. Thus the TAC KBP competition evaluation includes NIL entities; participants are required to cluster NIL mentions across documents so that all mentions of each unknown entity are assigned a unique identifier. In addition to the official evaluation metric B 3+ F 1 , we also report Plato's in-KB accuracy as well as overall accuracy, where all NIL mentions are considered to be in one cluster. Once again as in the case of CoNLL, we did not train or tune Plato on any TAC KBP training data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Data and Setup", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For each of the test corpora, we evaluate on the exact same set of mentions as previous work that we compare against in Tables 2 and 3 . However, our setup differs from existing systems in two important ways. First, we train on Wikipedia and unlabeled Web data only, and do not use TAC or CoNLL training or development datasets. Second, candidate generation for each mention is based on our estimated mention prior (see Section 8 for details) and thus may differ from previous approaches. Plato's candidate recall is shown on Table 1 ; this is an upper bound on the accuracy of our approach.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 134, |
|
"text": "Tables 2 and 3", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 526, |
|
"end": 533, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Data and Setup", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Mention Prior We initialized the mention phrase parameters {\u03c4 e } from links in Wikipedia by counting how many times a given phrase w is used to refer to entity e, and normalizing appropriately (Han and Sun, 2012) . We used the following sources to obtain (w, e) pairs for the above estimates: (a) w is the title of e's page after removing parenthesized disambiguation terms; (b) w is the title of a Wikipedia Dataset Candidate recall CoNLL 2003 91.7 TAC 2011 84.8 TAC 2012 83.2 Table 1 : Candidate generation recall on the three evaluation datasets: the percentage of linkable gold mentions for which the correct entity was in the set of candidates generated by Plato. This is an upper bound on our in-KB accuracy.", |
|
"cite_spans": [ |
|
{ |
|
"start": 194, |
|
"end": 213, |
|
"text": "(Han and Sun, 2012)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 410, |
|
"end": 481, |
|
"text": "Dataset Candidate recall CoNLL 2003 91.7 TAC 2011 84.8 TAC 2012", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 487, |
|
"end": 494, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "redirect page linking to e's page; (c) w is a Freebase alias (property /common/topic/alias) for Freebase topic e; (d) w is the title of a disambiguation page that links to e as a possible disambiguation. For all the aliases obtained from the above sources, we used the Wikilinks corpus (Orr et al., 2013) as an additional source of counts. In addition to the above sources, we also used anchors of Wikipedia pages as aliases if they occurred more than 500 times. Unlabeled data was used to reestimate the parameters \u03c4 e using Equation 4; however, we did not introduce any new aliases.", |
|
"cite_spans": [ |
|
{ |
|
"start": 286, |
|
"end": 304, |
|
"text": "(Orr et al., 2013)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Context Features To extract context features, all documents were processed as follows. The free text in each document was POS-tagged and dependencyparsed using a parser that is a reimplementation of the MaltParser (Nivre et al., 2007) with a linear kernel SVM. When trained on Sections 02-21 of the Wall Street Journal (WSJ) portion of the Penn Treebank (Marcus et al., 1993) , our parser achieves an unlabeled attachment score (UAS) of 88.24 and a labeled attachment score (LAS) of 84.69 on WSJ Section 22. Named mentions (such as Barack Obama) and common noun phrases (such as the president) were identified using a simple rulebased tagger with rules over POS tag sequences and dependency parses. We then used a withindocument coreference resolver comparable to that of Haghighi and Klein (2009) to cluster all identified mentions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 214, |
|
"end": 234, |
|
"text": "(Nivre et al., 2007)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 354, |
|
"end": 375, |
|
"text": "(Marcus et al., 1993)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 772, |
|
"end": 797, |
|
"text": "Haghighi and Klein (2009)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "As context features in our model, we used the phrases of all mentions in each coreference cluster in the document. We did not differentiate between phrases corresponding to the same coreference cluster as the query string, and phrases in other clusters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Adding other types of local features, such as words and phrases near the mentions in a cluster and dependency paths involving mentions in a cluster, did not lead to significant performance improvements in either our proposed model or na\u00efve Bayes, and so we did not include them. We initialized the context parameters {\u03c1 k } using only labeled data, and re-estimated them using unlabeled data, as detailed in Equation 5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Inference To determine the set of candidate entities for each coreference cluster, we use the mention with the longest phrase in the cluster. This phrase is used to generate the candidates which are then scored using Equation 2. We copy the label of the highest-scoring entity to all mentions in the cluster. Note that clusters will often include proper names mentions, referential common noun phrases, and referential pronouns. Thus Plato detects and links both nominal and named mentions. However, following most existing work, we only evaluate the resolution of gold named entity mentions to either KB or NIL. While in the case of CoNLL all gold mentions can be resolved to the KB, in TAC NIL is a valid label.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "NIL Detection and Clustering As noted earlier, not all mentions correspond to an entity in the KB, even if they share a name or alias with a known entity. The problem is further complicated by the fact that it is hard to estimate the total number of entities in the world with a particular name. Plato decides whether to resolve a mention m (i.e., the cluster that m is a member off) to an entity in KB or to NIL based on context alone. Let", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "a * m = max e k b m,k \u03c1 e,k \u03b2 k .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "If a * m < \u03b1, we resolve i to NIL. We found that setting \u03b1 = 1e \u22125 works well in practice. We use the above rule both during EM-based learning and at inference time.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "The TAC KBP evaluation requires participants to perform cross-document clustering of NIL mentions, such that each unknown entity has a distinct NIL id. We employ a very simple clustering strategy, similar to Cucerzan (2012):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "\u2022 Since our KB is much bigger than 2008", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Wikipedia, Plato sometimes resolves mentions", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "CoNLL 2003 test-b micro-accuracy Hoffart et al. (2013) 82.5 Sil and Yates (2013) 84.2 Nguyen et al. 201484.8 Houlsby and Ciaramita (2014) 84.9 He et al. (2013) + 85.6 Chisholm and Hachey (2015) to entities that are in our KB but not in the TAC KB. We assign the same NIL label to all such mentions of the same entity.", |
|
"cite_spans": [ |
|
{ |
|
"start": 33, |
|
"end": 54, |
|
"text": "Hoffart et al. (2013)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 60, |
|
"end": 80, |
|
"text": "Sil and Yates (2013)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 109, |
|
"end": 137, |
|
"text": "Houlsby and Ciaramita (2014)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 167, |
|
"end": 193, |
|
"text": "Chisholm and Hachey (2015)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 We assign a unique NIL identifier to each coreference cluster that is resolved to NIL by Plato.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We estimated the parameters of the na\u00efve Bayes model from labeled data only, using a symmetric Dirichlet prior to prevent zero probabilities. While the na\u00efve Bayes model can also be extended to include unlabeled data using the EM algorithm (Nigam et al., 2000) , we did not pursue this beyond preliminary experiments, since the initial results were mediocre and re-estimation on unlabeled data is known to be very sensitive to initialization.", |
|
"cite_spans": [ |
|
{ |
|
"start": 240, |
|
"end": 260, |
|
"text": "(Nigam et al., 2000)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Na\u00efve Bayes Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Table 2 summarizes entity resolution results on the CoNLL 2003 corpus. This evaluation only considers linkable mentions, and we compare different systems in terms of mention-averaged accuracy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "As external baselines for CoNLL test-b, we show the results of (Nguyen et al., 2014; Sil and Yates, 2013; Houlsby and Ciaramita, 2014; He et al., 2013; Chisholm and Hachey, 2015) . To the best of our knowledge, these are the top reported results for this dataset. We also note that the systems of Alhelbawy and Gaizauskas (2014) and Pershina et al. (2015) are highly competitive on CoNLL; however we do not include their results in Table 2 due to differences to the standard evaluation settings (while we evaluate on test-b as is standard practice they evaluate on the entire dataset). Table 3 shows the evaluation results on the two TAC KBP corpora; these evaluations also consider NIL entities. Our baseline for TAC 2012 is the system of Cucerzan (2012), which achieved the highest accuracy and B 3+ F 1 score in the 2012 competition. Our baseline for TAC 2011 is Cucerzan (2011), which achieved the highest overall accuracy and second-highest B 3+ F 1 score in the 2011 competition. Dalton and Dietz (2013) also report high accuracy on TAC KBP data; however, their results are computed on non-standard training/evaluation data splits, and hence not directly comparable.", |
|
"cite_spans": [ |
|
{ |
|
"start": 63, |
|
"end": 84, |
|
"text": "(Nguyen et al., 2014;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 85, |
|
"end": 105, |
|
"text": "Sil and Yates, 2013;", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 106, |
|
"end": 134, |
|
"text": "Houlsby and Ciaramita, 2014;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 135, |
|
"end": 151, |
|
"text": "He et al., 2013;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 152, |
|
"end": 178, |
|
"text": "Chisholm and Hachey, 2015)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 297, |
|
"end": 328, |
|
"text": "Alhelbawy and Gaizauskas (2014)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 333, |
|
"end": 355, |
|
"text": "Pershina et al. (2015)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 986, |
|
"end": 1009, |
|
"text": "Dalton and Dietz (2013)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 432, |
|
"end": 439, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 586, |
|
"end": 593, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "In both tables, we include the results of all of our experiments: (a) mention prior baseline, (b) supervised na\u00efve Bayes (see Section 3 for details); (c) supervised selective context model; and (d) Plato, the semi-supervised selective context model. The mention prior alone does surprisingly well on this task, but well below the previous best results, as might be expected. Supervised na\u00efve Bayes performs better, but does not offer much improvement over the mention prior. The supervised selective context model performs much better than na\u00efve Bayes, even though it is trained on exactly the same data. These results support our hypothesis that the performance of na\u00efve Bayes suffers in the presence of irrelevant features. Finally, Plato outperforms the", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "In-KB Table 3 : TAC KBP evaluation results for our model and previous highest-accuracy systems. The best results are shown in bold-face; this includes the highest-accuracy system and systems whose performance was not statistically significantly different, according to a two-tailed t-test with p = 0.05.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 6, |
|
"end": 13, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "other models in accuracy by a substantial margin, suggesting that incorporating unlabeled data helps the model generalize through feature discovery.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In comparison with previous work, Plato is highly competitive with existing results on all three datasets. On TAC 2012 data, Plato achieves the highest in-KB accuracy, and the same overall accuracy as the previous best system (Cucerzan, 2012). On TAC 2011 data, Plato once again achieves the highest in-KB accuracy, and has similar overall accuracy as the best system of (Cucerzan, 2011) (86.5 compared to 86.8).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The highest reported accuracy on CoNLL (testb) is obtained by Chisholm and Hachey (2015) , who use both Wikipedia and Wiklinks to train their model. Their results support the case for incorporating large amounts of noisy training data into entity linking systems. Without coherency, their model performs slightly worse than Plato, suggesting that coherency is a good direction for improving Plato. Recently, Pershina et al. (2015) have reported accuracy 91.8 on the entire CoNLL dataset (train, testa, test-b) using a variant of Personalized PageRank. This is higher than Plato candidate recall (upper bound on our accuracy); as their publication is very recent, we have not had the chance to evaluate Plato on their candidates. Current Plato accuracy on the whole CoNLL dataset is 86.5.", |
|
"cite_spans": [ |
|
{ |
|
"start": 62, |
|
"end": 88, |
|
"text": "Chisholm and Hachey (2015)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 408, |
|
"end": 430, |
|
"text": "Pershina et al. (2015)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Since many of the systems described here were trained on different datasets and features, it is hard to provide a deeper comparison of their properties. However, we reiterate that Plato's performance is out-of-the-box: it was not trained on CoNLL or TAC data, and we used the exact same model for all evaluations in Tables 2 and 3 Finally, we illustrate the favorable feature discovery properties of the semi-supervised approach with the following example:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 316, |
|
"end": 330, |
|
"text": "Tables 2 and 3", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "George Harrison said that partnering with Roush Fenway Racing further demonstrates how Wii is bringing gaming to the masses.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Here the mention George Harrison refers to the former senior vice-president (SVP) of Nintendo, and not the Beatle George Harrison (Freebase id /m/03bnv).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The mention prior has a strong preference for the Beatles: p(Beatles|George Harrison) = 0.92 while p(SVP|George Harrison) = 0.02. In addition, none of the sentence context features occur in Wikipedia. Since the supervised model is trained only on Wikipedia, the mention prior component dominates, and the system incorrectly infers that George Harrison refers to the Beatle. However, once we retrain with unlabeled data, the model learns several new relevant features for the correct entity, including Rouse Fenway Racing and Wii, and gaming. As a result we now get p(SVP|George Harrison, b) = 0.74 p(Beatles|George Harrison, b) = 0.25, which leads us to the correct inference for the person mentioned in the passage.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We have presented Plato, a simple and scalable entity resolution system that leverages unlabeled data to produce state-of-the-art results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future work", |
|
"sec_num": "10" |
|
}, |
|
{ |
|
"text": "The main gains in our approach come from combining a novel selective context model with a large corpus of unlabeled Web documents. We have demonstrated that a model in which most features are considered noisy is superior to a model in which all features depend on the entity. However, in some circumstances such a model may fail to exploit all useful features. An obvious direction for future work is extending the framework to cases where a small subset of features can be relevant, for example using binary per-feature indicator variables. A more subtle direction for context modeling could involve distinguishing between salient entities, for which most features (mentions in that cluster) in a document are likely to be informative, and non-salient entities with few informative features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future work", |
|
"sec_num": "10" |
|
}, |
|
{ |
|
"text": "Plato does not include a cross-entity coherency model; while such models are intuitively appealing, they depend on cross-entity links that are often missing for rare entities, and may require computationally demanding joint inference in a probabilistic model. We capture discourse coherency only by adding referring phrases in the document to the context features of each mention cluster (as strings, not entities). Very recent results by Chisholm and Hachey (2015) and Pershina et al. (2015) suggest that simple coherence-based rescoring can significantly boost performance, and so this is another potential direction for improving Plato.", |
|
"cite_spans": [ |
|
{ |
|
"start": 439, |
|
"end": 465, |
|
"text": "Chisholm and Hachey (2015)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 470, |
|
"end": 492, |
|
"text": "Pershina et al. (2015)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future work", |
|
"sec_num": "10" |
|
}, |
|
{ |
|
"text": "While semi-supervised training leads to major accuracy gains for our method, it also creates very large models. We are able to serve those models with a simple distributed architecture, but it would be worth exploring inference methods that could reduce model size while not compromising accuracy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future work", |
|
"sec_num": "10" |
|
}, |
|
{ |
|
"text": "One possibility involves improving inference to select a small set of relevant features for each mention, rather than averaging over all features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future work", |
|
"sec_num": "10" |
|
}, |
|
{ |
|
"text": "Pointwise mutual information is defined as pmi(x; y) = log p(y|x)/p(y).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "www.freebase.com. 3 www.mpi-inf.mpg.de/yago-naga/aida.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Graph ranking for collective named entity disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Ayman", |
|
"middle": [], |
|
"last": "Alhelbawy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Gaizauskas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proc. 52nd Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "14", |
|
"issue": "", |
|
"pages": "75--80", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ayman Alhelbawy and Robert Gaizauskas. 2014. Graph ranking for collective named entity disambiguation. In Proc. 52nd Annual Meeting of the Association for Computational Linguistics, ACL 14, pages 75-80.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Freebase: a collaboratively created graph database for structuring human knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Kurt", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Bollacker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Evans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Praveen", |
|
"middle": [], |
|
"last": "Paritosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Sturge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Taylor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proc. of the 2008 ACM SIGMOD International Conference on Management of Data", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1247--1250", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kurt D. Bollacker, Colin Evans, Praveen Paritosh, Tim Sturge, and Jamie Taylor. 2008. Freebase: a collabo- ratively created graph database for structuring human knowledge. In Proc. of the 2008 ACM SIGMOD Inter- national Conference on Management of Data, pages 1247-1250. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Using encyclopedic knowledge for named entity disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Razvan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marius", |
|
"middle": [], |
|
"last": "Bunescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pasca", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proc. 11th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Razvan C. Bunescu and Marius Pasca. 2006. Using en- cyclopedic knowledge for named entity disambigua- tion. In Proc. 11th Conference of the European Chap- ter of the Association for Computational Linguistics, EACL 06.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Entity disambiguation with web links", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Chisholm", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Hachey", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "145--156", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew Chisholm and Ben Hachey. 2015. Entity disam- biguation with web links. Transactions of the Associ- ation for Computational Linguistics, 3:145-156.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Large-scale named entity disambiguation based on Wikipedia data", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Silviu Cucerzan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "708--716", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Silviu Cucerzan. 2007. Large-scale named entity dis- ambiguation based on Wikipedia data. In Proc. of EMNLP-CoNLL 2007, pages 708-716.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "TAC entity linking by performing full-document entity extraction and disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Silviu Cucerzan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of the Text Analysis Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Silviu Cucerzan. 2011. TAC entity linking by perform- ing full-document entity extraction and disambigua- tion. In In Proc. of the Text Analysis Conference, TAC 11.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "The MSR system for entity linking at TAC 2012", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Silviu Cucerzan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. of the Text Analysis Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Silviu Cucerzan. 2012. The MSR system for entity link- ing at TAC 2012. In In Proc. of the Text Analysis Con- ference, TAC 12.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A neighborhood relevance model for entity linking", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dalton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Dietz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. of the 10th Conference on Open Research Areas in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Dalton and Laura Dietz. 2013. A neighborhood relevance model for entity linking. In Proc. of the 10th Conference on Open Research Areas in Information Retrieval.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "MapReduce: simplified data processing on large clusters", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjay", |
|
"middle": [], |
|
"last": "Ghemawat", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proc. of the 23rd International Conference on Computational Linguistics", |
|
"volume": "51", |
|
"issue": "", |
|
"pages": "277--285", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Dean and Sanjay Ghemawat. 2008. MapReduce: simplified data processing on large clusters. Commu- nications of the ACM, 51(1):107-113, January. Mark Dredze, Paul McNamee, Delip Rao, Adam Ger- ber, and Tim Finin. 2010. Entity disambiguation for knowledge base population. In Proc. of the 23rd In- ternational Conference on Computational Linguistics, COLING 10, pages 277-285.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "TAGME: on-the-fly annotation of short text fragments (by Wikipedia entities)", |
|
"authors": [ |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Ferragina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ugo", |
|
"middle": [], |
|
"last": "Scaiella", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proc. of the 19th ACM International Conference on Information Knowledge and Management, CIKM 10", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1625--1628", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paolo Ferragina and Ugo Scaiella. 2010. TAGME: on-the-fly annotation of short text fragments (by Wikipedia entities). In Proc. of the 19th ACM Inter- national Conference on Information Knowledge and Management, CIKM 10, pages 1625-1628. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Using Wikitology for cross-document entity coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Finin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zareen", |
|
"middle": [], |
|
"last": "Syed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Mayfield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Mc-Namee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Piatko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of the AAAI Spring Symposium on Learning by Reading and Learning to Read", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tim Finin, Zareen Syed, James Mayfield, Paul Mc- Namee, and Christine Piatko. 2009. Using Wikitol- ogy for cross-document entity coreference resolution. In Proc. of the AAAI Spring Symposium on Learning by Reading and Learning to Read. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Harnessing the expertise of 70,000 human editors: Knowledge-based feature generation for text categorization", |
|
"authors": [ |
|
{ |
|
"first": "Evgeniy", |
|
"middle": [], |
|
"last": "Gabrilovich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaul", |
|
"middle": [], |
|
"last": "Markovitch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "2297--2345", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Evgeniy Gabrilovich and Shaul Markovitch. 2007. Harnessing the expertise of 70,000 human editors: Knowledge-based feature generation for text catego- rization. Journal of Machine Learning Research, 8:2297-2345, December.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Evaluating entity linking with Wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Hachey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Nothman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Honnibal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Curran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Artificial Intelligence", |
|
"volume": "194", |
|
"issue": "0", |
|
"pages": "130--150", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ben Hachey, Will Radford, Joel Nothman, Matthew Hon- nibal, and James R. Curran. 2013. Evaluating en- tity linking with Wikipedia. Artificial Intelligence, 194(0):130 -150.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Simple coreference resolution with rich syntactic and semantic features", |
|
"authors": [ |
|
{ |
|
"first": "Aria", |
|
"middle": [], |
|
"last": "Haghighi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc.of the 2009 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aria Haghighi and Dan Klein. 2009. Simple coreference resolution with rich syntactic and semantic features. In Proc.of the 2009 Conference on Empirical Methods in Natural Language Processing, EMNLP 09. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Clueweb09 data set", |
|
"authors": [ |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Hallan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Hoy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jamie Hallan and Mark Hoy. 2009. Clueweb09 data set. http://lemurproject.org/clueweb09/.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "A generative entitymention model for linking entities with knowledge base", |
|
"authors": [ |
|
{ |
|
"first": "Xianpei", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Le", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xianpei Han and Le Sun. 2011. A generative entity- mention model for linking entities with knowledge base. In Proc. of the 49th Annual Meeting of the As- sociation for Computational Linguistics: Human Lan- guage Technologies, volume 1 of ACLHLT 11. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "An entity-topic model for entity linking", |
|
"authors": [ |
|
{ |
|
"first": "Xianpei", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Le", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "105--115", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xianpei Han and Le Sun. 2012. An entity-topic model for entity linking. In EMNLP-CoNLL, pages 105-115.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Named entity disambiguation by leveraging Wikipedia semantic knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Xianpei", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of the 18th Conference on Information and Knowledge Management, CIKM 09", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "215--224", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xianpei Han and Jun Zhao. 2009. Named entity dis- ambiguation by leveraging Wikipedia semantic knowl- edge. In Proc. of the 18th Conference on Information and Knowledge Management, CIKM 09, pages 215- 224. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Collective entity linking in Web text: a graph-based method", |
|
"authors": [ |
|
{ |
|
"first": "Xianpei", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Le", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of the 51st Annual Meeting of the Association for Computational Linguistics, ACL 13", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "30--34", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xianpei Han, Le Sun, and Jun Zhao. 2011. Collective entity linking in Web text: a graph-based method. In In Proc. 34th ACM SIGIR conference on research and development in information retrieval, pages 765-774. ACM. Zhengyan He, Shujie Liu, Mu Li, Ming Zhou, Longkai Zhang, and Houfeng Wang. 2013. Learning entity representation for entity disambiguation. In Proc. of the 51st Annual Meeting of the Association for Com- putational Linguistics, ACL 13, pages 30-34.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Robust disambiguation of named entities in text", |
|
"authors": [ |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Hoffart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohamed", |
|
"middle": [ |
|
"Amir" |
|
], |
|
"last": "Yosef", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilaria", |
|
"middle": [], |
|
"last": "Bordino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hagen", |
|
"middle": [], |
|
"last": "F\u00fcrstenau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manfred", |
|
"middle": [], |
|
"last": "Pinkal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Spaniol", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bilyana", |
|
"middle": [], |
|
"last": "Taneva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Thater", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Weikum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of the 2011 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Johannes Hoffart, Mohamed Amir Yosef, Ilaria Bordino, Hagen F\u00fcrstenau, Manfred Pinkal, Marc Spaniol, Bilyana Taneva, Stefan Thater, and Gerhard Weikum. 2011. Robust disambiguation of named entities in text. In Proc. of the 2011 Conference on Empirical Methods in Natural Language Processing, EMNLP11. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "AIDA: accurate online disambiguation of named entities in text and tables", |
|
"authors": [ |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Hoffart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohamed", |
|
"middle": [ |
|
"Amir" |
|
], |
|
"last": "Yosef", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilaria", |
|
"middle": [], |
|
"last": "Bordino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hagen", |
|
"middle": [], |
|
"last": "F\u00fcrstenau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manfred", |
|
"middle": [], |
|
"last": "Pinkal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Spaniol", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bilyana", |
|
"middle": [], |
|
"last": "Taneva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Thater", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Weikum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Johannes Hoffart, Mohamed Amir Yosef, Ilaria Bor- dino, Hagen F\u00fcrstenau, Manfred Pinkal, Marc Spaniol, Bilyana Taneva, Stefan Thater, and Ger- hard Weikum. 2013. AIDA: accurate on- line disambiguation of named entities in text and tables. http://www.mpi-inf.mpg.de/ yago-naga/aida/index.html.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "A scalable Gibbs sampler for probabilistic entity linking", |
|
"authors": [ |
|
{ |
|
"first": "Neil", |
|
"middle": [], |
|
"last": "Houlsby", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Massimiliano", |
|
"middle": [], |
|
"last": "Ciaramita", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Advances in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "335--346", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Neil Houlsby and Massimiliano Ciaramita. 2014. A scalable Gibbs sampler for probabilistic entity linking. In Advances in Information Retrieval, pages 335-346. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Overview of the TAC 2011 knowledge base population track", |
|
"authors": [ |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hoa", |
|
"middle": [ |
|
"Trang" |
|
], |
|
"last": "Dang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of the 4th Text Analysis Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Heng Ji, Ralph Grishman, and Hoa Trang Dang. 2011. Overview of the TAC 2011 knowledge base population track. In Proc. of the 4th Text Analysis Conference, TAC 11.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Entity linking at the tail: sparse signals, unknown entities, and phrase models", |
|
"authors": [ |
|
{ |
|
"first": "Yuzhe", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emre", |
|
"middle": [], |
|
"last": "Kiciman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kuansan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ricky", |
|
"middle": [], |
|
"last": "Loynd", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proc. of the 7th ACM International Conference on Web Search and Data Mining, WSDM '14", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "453--462", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuzhe Jin, Emre Kiciman, Kuansan Wang, and Ricky Loynd. 2014. Entity linking at the tail: sparse sig- nals, unknown entities, and phrase models. In Proc. of the 7th ACM International Conference on Web Search and Data Mining, WSDM '14, pages 453-462, New York, NY, USA. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Entity disambiguation with hierarchical topic models", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Saurabh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kataria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Krishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rajeev", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prithviraj", |
|
"middle": [], |
|
"last": "Rastogi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Srinivasan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sengamedu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of the 17th ACM SIGKDD Conference on Knowledge Discovery and Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1037--1045", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saurabh S. Kataria, Krishnan S. Kumar, Rajeev R. Ras- togi, Prithviraj Sen, and Srinivasan H. Sengamedu. 2011. Entity disambiguation with hierarchical topic models. In Proc. of the 17th ACM SIGKDD Confer- ence on Knowledge Discovery and Data Mining, pages 1037-1045. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Collective annotation of Wikipedia entities in web text", |
|
"authors": [ |
|
{ |
|
"first": "Sayali", |
|
"middle": [], |
|
"last": "Kulkarni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amit", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ganesh", |
|
"middle": [], |
|
"last": "Ramakrishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soumen", |
|
"middle": [], |
|
"last": "Chakrabarti", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of the 15th ACM SIGKDD Conference on Knowledge Discovery and Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "457--466", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sayali Kulkarni, Amit Singh, Ganesh Ramakrishnan, and Soumen Chakrabarti. 2009. Collective annotation of Wikipedia entities in web text. In Proc. of the 15th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, pages 457-466. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Lexical generalization in CCG grammar induction for semantic parsing", |
|
"authors": [ |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Kwiatkowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharon", |
|
"middle": [], |
|
"last": "Goldwater", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Steedman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of the 2011 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1512--1523", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom Kwiatkowski, Luke S. Zettlemoyer, Sharon Gold- water, and Mark Steedman. 2011. Lexical generaliza- tion in CCG grammar induction for semantic parsing. In Proc. of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 1512-1523.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Entity linking at web scale", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mausam", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. of the Joint Workshop on Automatic Knowledge Base Construction and Webscale Knowledge Extraction", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Lin, Mausam, and Oren Etzioni. 2012. Entity linking at web scale. In Proc. of the Joint Workshop on Automatic Knowledge Base Construction and Web- scale Knowledge Extraction, AKBC-WEKEX '12.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Building a large annotated corpus of English: The Penn Treebank", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mary", |
|
"middle": [ |
|
"Ann" |
|
], |
|
"last": "Marcus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beatrice", |
|
"middle": [], |
|
"last": "Marcinkiewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Santorini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Computational linguistics", |
|
"volume": "19", |
|
"issue": "2", |
|
"pages": "313--330", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mitchell P Marcus, Mary Ann Marcinkiewicz, and Beat- rice Santorini. 1993. Building a large annotated cor- pus of English: The Penn Treebank. Computational linguistics, 19(2):313-330.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Cross-document coreference resolution: A key technology for learning by reading", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Mayfield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Alexander", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bonnie", |
|
"middle": [], |
|
"last": "Dorr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Eisner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tamer", |
|
"middle": [], |
|
"last": "Elsayed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Finin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clay", |
|
"middle": [], |
|
"last": "Fink", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marjorie", |
|
"middle": [], |
|
"last": "Freedman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikesh", |
|
"middle": [], |
|
"last": "Garera", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Mcnamee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saif", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douglas", |
|
"middle": [], |
|
"last": "Oard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Piatko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "AAAI 2009 Spring Symposium on Learning by Reading and Learning to Read", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Mayfield, David Alexander, Bonnie Dorr, Jason Eisner, Tamer Elsayed, Tim Finin, Clay Fink, Marjorie Freedman, Nikesh Garera, Paul McNamee, Saif Mo- hammad, Douglas Oard, Christine Piatko, Asad Say- eed, Zareen Syed, Ralph Weischedel, Tan Xu, and David Yarowsky. 2009. Cross-document coreference resolution: A key technology for learning by read- ing. In AAAI 2009 Spring Symposium on Learning by Reading and Learning to Read.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Overview of the TAC 2012 knowledge base population track", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Mayfield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Javier", |
|
"middle": [], |
|
"last": "Artiles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hoa", |
|
"middle": [ |
|
"Trang" |
|
], |
|
"last": "Dang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. of the 5th Text Analysis Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Mayfield, Javier Artiles, and Hoa Trang Dang. 2012. Overview of the TAC 2012 knowledge base population track. In Proc. of the 5th Text Analysis Conference, TAC 12.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Wikify!: linking documents to encyclopedic knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andras", |
|
"middle": [], |
|
"last": "Csomai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of the 16th ACM Conference on Information and Knowledge Management, CIKM 07", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "233--242", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rada Mihalcea and Andras Csomai. 2007. Wikify!: link- ing documents to encyclopedic knowledge. In Proc. of the 16th ACM Conference on Information and Knowl- edge Management, CIKM 07, pages 233-242.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Learning to link with Wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "David", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Milne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Witten", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proc. of the 17th ACM Conference on Information and Knowledge Management, CIKM 07", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "509--518", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David N. Milne and Ian H. Witten. 2008. Learning to link with Wikipedia. In Proc. of the 17th ACM Con- ference on Information and Knowledge Management, CIKM 07, pages 509-518.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Named entity disambiguation on an ontology enriched by Wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Hien", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tru", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proc. 2008 IEEE International Conference on Research, Innovation, and Vision for the Future in Computing and Communication Technologies, RIVF 08", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "247--254", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hien T. Nguyen and Tru H. Cao. 2008. Named entity disambiguation on an ontology enriched by Wikipedia. In Proc. 2008 IEEE International Conference on Re- search, Innovation, and Vision for the Future in Com- puting and Communication Technologies, RIVF 08, pages 247-254. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "AIDA-light: Highthroughput named-entity disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Dat Ba Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Hoffart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Theobald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Weikum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proc. of the Linked Data on the Web Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dat Ba Nguyen, Johannes Hoffart, Martin Theobald, and Gerhard Weikum. 2014. AIDA-light: High- throughput named-entity disambiguation. In Proc. of the Linked Data on the Web Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Text classification from labeled and unlabeled documents using EM. Machine Learning", |
|
"authors": [ |
|
{ |
|
"first": "Kamal", |
|
"middle": [], |
|
"last": "Nigam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"Kachites" |
|
], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Thrun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "39", |
|
"issue": "", |
|
"pages": "103--134", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kamal Nigam, Andrew Kachites McCallum, Sebastian Thrun, and Tom Mitchell. 2000. Text classification from labeled and unlabeled documents using EM. Ma- chine Learning, 39(2-3):103-134, May.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "MaltParser: A languageindependent system for data-driven dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johan", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jens", |
|
"middle": [], |
|
"last": "Nilsson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atanas", |
|
"middle": [], |
|
"last": "Chanev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G\u00fclsen", |
|
"middle": [], |
|
"last": "Eryigit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [], |
|
"last": "K\u00fcbler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Svetoslav", |
|
"middle": [], |
|
"last": "Marinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erwin", |
|
"middle": [], |
|
"last": "Marsi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Natural Language Engineering", |
|
"volume": "13", |
|
"issue": "02", |
|
"pages": "95--135", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joakim Nivre, Johan Hall, Jens Nilsson, Atanas Chanev, G\u00fclsen Eryigit, Sandra K\u00fcbler, Svetoslav Marinov, and Erwin Marsi. 2007. MaltParser: A language- independent system for data-driven dependency pars- ing. Natural Language Engineering, 13(02):95-135.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Learning from big data: 40 million entities in context", |
|
"authors": [ |
|
{ |
|
"first": "Dave", |
|
"middle": [], |
|
"last": "Orr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amar", |
|
"middle": [], |
|
"last": "Subramanya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dave Orr, Amar Subramanya, and Fernando Pereira. 2013. Learning from big data: 40 million entities in context. http:// googleresearch.blogspot.com/2013/03/ learning-from-big-data-40-million. html.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "The PageRank citation ranking: bringing order to the web", |
|
"authors": [ |
|
{ |
|
"first": "Lawrence", |
|
"middle": [], |
|
"last": "Page", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Brin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rajeev", |
|
"middle": [], |
|
"last": "Motwani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Terry", |
|
"middle": [], |
|
"last": "Winograd", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lawrence Page, Sergey Brin, Rajeev Motwani, and Terry Winograd. 1999. The PageRank citation ranking: bringing order to the web. Technical Report SIDL- WP-1999-0120, Stanford InfoLab, November.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Personalized Page Rank for named entity disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Pershina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yifan", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proc. 2015 Annual Conference of the North American Chapter of the ACL, NAACL HLT 14", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "238--243", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maria Pershina, Yifan He, and Ralph Grishman. 2015. Personalized Page Rank for named entity disambigua- tion. In Proc. 2015 Annual Conference of the North American Chapter of the ACL, NAACL HLT 14, pages 238-243.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Local and global algorithms for disambiguation to Wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Lev-Arie", |
|
"middle": [], |
|
"last": "Ratinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Doug", |
|
"middle": [], |
|
"last": "Downey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Anderson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, ACLHLT 11", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1375--1384", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lev-Arie Ratinov, Dan Roth, Doug Downey, and Mike Anderson. 2011. Local and global algorithms for dis- ambiguation to Wikipedia. In Proc. of the 49th Annual Meeting of the Association for Computational Linguis- tics: Human Language Technologies, ACLHLT 11, pages 1375-1384. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Re-ranking for joint named-entity recognition and linking", |
|
"authors": [ |
|
{ |
|
"first": "Avirup", |
|
"middle": [], |
|
"last": "Sil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Yates", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. of the 22nd ACM International Conference on Information & Knowledge Management, CIKM 13", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2369--2374", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Avirup Sil and Alexander Yates. 2013. Re-ranking for joint named-entity recognition and linking. In Proc. of the 22nd ACM International Conference on Infor- mation & Knowledge Management, CIKM 13, pages 2369-2374. ACM.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"text": "Selective context model. The mention phrase w m provides a prior distribution over possible entities for mention m. The latent variable k m selects a relevant context feature b m,km that fires for entity e m ; the remaining features are drawn from a background distribution p(b j |\u03b2 j ). The entity is represented as a latent variable here, but it is observed for the labeled training mentions.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"text": "Mention-averaged accuracy on the CoNLL 2003 dataset in our experiments and previous best work. The results of the best system are shown in bold-face.", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |