|
{ |
|
"paper_id": "K19-1049", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:06:41.592086Z" |
|
}, |
|
"title": "Learning Dense Representations for Entity Retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Gillick", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Sayali", |
|
"middle": [], |
|
"last": "Kulkarni", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Larry", |
|
"middle": [], |
|
"last": "Lansing", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Presta", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Baldridge", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Eugene", |
|
"middle": [], |
|
"last": "Ie", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Diego", |
|
"middle": [], |
|
"last": "Garcia-Olano", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We show that it is feasible to perform entity linking by training a dual encoder (two-tower) model that encodes mentions and entities in the same dense vector space, where candidate entities are retrieved by approximate nearest neighbor search. Unlike prior work, this setup does not rely on an alias table followed by a re-ranker, and is thus the first fully learned entity retrieval model. We show that our dual encoder, trained using only anchor-text links in Wikipedia, outperforms discrete alias table and BM25 baselines, and is competitive with the best comparable results on the standard TACKBP-2010 dataset. In addition, it can retrieve candidates extremely fast, and generalizes well to a new dataset derived from Wikinews. On the modeling side, we demonstrate the dramatic value of an unsupervised negative mining algorithm for this task. * Equal Contributions \u2020 Work done during internship with Google arbitrary, hard cutoffs, such as only including the thirty most popular entities associated with a particular mention. We show that this configuration can be replaced with a more robust model that represents both entities and mentions in the same vector space. Such a model allows candidate entities to be directly and efficiently retrieved for a mention, using nearest neighbor search. To see why a retrieval approach is desirable, we need to consider how alias tables are employed in entity resolution systems. In the following example, Costa refers to footballer Jorge Costa, but the entities associated with that alias in existing Wikipedia text are Costa Coffee, Paul Costa Jr, Costa Cruises, and many others-while excluding the true entity. Costa has not played since being struck by the AC Milan forward... The alias table could be expanded so that last-name aliases are added for all person entities, but it is impossible to come up with rules covering all scenarios. Consider this harder example: ...warned Franco Giordano, secretary of the Refoundation Communists following a coalition meeting late Wednesday...", |
|
"pdf_parse": { |
|
"paper_id": "K19-1049", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We show that it is feasible to perform entity linking by training a dual encoder (two-tower) model that encodes mentions and entities in the same dense vector space, where candidate entities are retrieved by approximate nearest neighbor search. Unlike prior work, this setup does not rely on an alias table followed by a re-ranker, and is thus the first fully learned entity retrieval model. We show that our dual encoder, trained using only anchor-text links in Wikipedia, outperforms discrete alias table and BM25 baselines, and is competitive with the best comparable results on the standard TACKBP-2010 dataset. In addition, it can retrieve candidates extremely fast, and generalizes well to a new dataset derived from Wikinews. On the modeling side, we demonstrate the dramatic value of an unsupervised negative mining algorithm for this task. * Equal Contributions \u2020 Work done during internship with Google arbitrary, hard cutoffs, such as only including the thirty most popular entities associated with a particular mention. We show that this configuration can be replaced with a more robust model that represents both entities and mentions in the same vector space. Such a model allows candidate entities to be directly and efficiently retrieved for a mention, using nearest neighbor search. To see why a retrieval approach is desirable, we need to consider how alias tables are employed in entity resolution systems. In the following example, Costa refers to footballer Jorge Costa, but the entities associated with that alias in existing Wikipedia text are Costa Coffee, Paul Costa Jr, Costa Cruises, and many others-while excluding the true entity. Costa has not played since being struck by the AC Milan forward... The alias table could be expanded so that last-name aliases are added for all person entities, but it is impossible to come up with rules covering all scenarios. Consider this harder example: ...warned Franco Giordano, secretary of the Refoundation Communists following a coalition meeting late Wednesday...", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "A critical part of understanding natural language is connecting specific textual references to real world entities. In text processing systems, this is the task of entity resolution: given a document where certain spans of text have been recognized as mentions referring to entities, the goal is to link them to unique entries in a knowledge base (KB), making use of textual context around the mentions as well as information about the entities. (We use the term mention to refer to the target span along with its context in the document.)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Real world knowledge bases are large (e.g., English Wikipedia has 5.7M articles), so existing work in entity resolution follows a two-stage approach: a first component nominates candidate entities for a given mention and a second one selects the most likely entity among those candidates. This parallels typical information retrieval systems that consist of an index and a re-ranking model. In entity resolution, the index is a table mapping aliases (possible names) to entities. Such tables need to be built ahead of time and are typically subject to It takes more sophistication to connect the colloquial expression Refoundation Communists to the Communist Refoundation Party. Alias tables cannot capture all ways of referring to entities in general, which limits recall.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Alias tables also cannot make systematic use of context. In the Costa example, the context (e.g., AC Milan forward, played) is necessary to know that this mention does not refer to a company or a psychologist. An alias table is blind to this information and must rely only on prior probabilities of entities given mention spans to manage ambiguity. Even if the correct entity is retrieved, it might have such a low prior that the re-ranking model cannot recover it. A retrieval system with access to both the mention span and its context can significantly improve recall. Furthermore, by pushing the work of the alias table into the model, we avoid manual processing and heuristics required for matching mentions to entities, which are often quite different for each new domain.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This work includes the following contributions:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We define a novel dual encoder architecture for learning entity and mention encodings suitable for retrieval. A key feature of the architecture is that it employs a modular hierarchy of sub-encoders that capture different aspects of mentions and entities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We describe a simple, fully unsupervised hard negative mining strategy that produces massive gains in retrieval performance, compared to using only random negatives.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We show that approximate nearest neighbor search using the learned representations can yield high quality candidate entities very efficiently.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Our model significantly outperforms discrete retrieval baselines like an alias table or BM25, and gives results competitive with the best reported accuracy on the standard TACKBP-2010 dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We provide a qualitative analysis showing that the model integrates contextual information and world knowledge even while simultaneously managing mention-to-title similarity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We acknowledge that most of the components of our work are not novel in and of themselves. Dual encoder architectures have a long history (Bromley et al., 1994; Chopra et al., 2005; Yih et al., 2011) , including for retrieval (Gillick et al., 2018) . Negative sampling strategies have been employed for many models and applications, e.g. Shrivastava et al. (2016) . Approximate nearest neighbor search is its own sub-field of study (Andoni and Indyk, 2008) . Nevertheless, to our knowledge, our work is the first combination of these ideas for entity linking. As a result, we demonstrate the first accurate, robust, and highly efficient system that is actually a viable substitute for standard, more cumbersome twostage retrieval and re-ranking systems. In contrast with existing literature, which reports multiple seconds to resolve a single mention, we can provide strong retrieval performance across all 5.7 million Wikipedia entities in around 3ms per mention.", |
|
"cite_spans": [ |
|
{ |
|
"start": 138, |
|
"end": 160, |
|
"text": "(Bromley et al., 1994;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 161, |
|
"end": 181, |
|
"text": "Chopra et al., 2005;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 182, |
|
"end": 199, |
|
"text": "Yih et al., 2011)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 226, |
|
"end": 248, |
|
"text": "(Gillick et al., 2018)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 338, |
|
"end": 363, |
|
"text": "Shrivastava et al. (2016)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 432, |
|
"end": 456, |
|
"text": "(Andoni and Indyk, 2008)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Most recent work on entity resolution has focused on training neural network models for the candidate reranking stage (Francis-Landau et al., 2016; Eshel et al., 2017; Yamada et al., 2017a; Gupta et al., 2017; Sil et al., 2018) . In general, this work explores useful context features and novel architectures for combining mention-side and entity-side features. Extensions include joint resolution over all entities in a document (Ratinov et al., 2011; Globerson et al., 2016; Ganea and Hofmann, 2017) , joint modeling with related tasks like textual similarity (Yamada et al., 2017b; Barrena et al., 2018) and cross-lingual modeling (Sil et al., 2018) , for example.", |
|
"cite_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 147, |
|
"text": "(Francis-Landau et al., 2016;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 148, |
|
"end": 167, |
|
"text": "Eshel et al., 2017;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 168, |
|
"end": 189, |
|
"text": "Yamada et al., 2017a;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 190, |
|
"end": 209, |
|
"text": "Gupta et al., 2017;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 210, |
|
"end": 227, |
|
"text": "Sil et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 430, |
|
"end": 452, |
|
"text": "(Ratinov et al., 2011;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 453, |
|
"end": 476, |
|
"text": "Globerson et al., 2016;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 477, |
|
"end": 501, |
|
"text": "Ganea and Hofmann, 2017)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 562, |
|
"end": 584, |
|
"text": "(Yamada et al., 2017b;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 585, |
|
"end": 606, |
|
"text": "Barrena et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 634, |
|
"end": 652, |
|
"text": "(Sil et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "By contrast, since we are using a two-tower or dual encoder architecture (Gillick et al., 2018; Serban et al., 2018) , our model cannot use any kind of attention over both mentions and entities at once, nor feature-wise comparisons as done by Francis-Landau et al. (2016) . This is a fairly severe constraint -for example, we cannot directly compare the mention span to the entity title -but it permits retrieval with nearest neighbor search for the entire context against a single, all encompassing representation for each entity.", |
|
"cite_spans": [ |
|
{ |
|
"start": 73, |
|
"end": 95, |
|
"text": "(Gillick et al., 2018;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 96, |
|
"end": 116, |
|
"text": "Serban et al., 2018)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 243, |
|
"end": 271, |
|
"text": "Francis-Landau et al. (2016)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "As a whole, the entity linking research space is fairly fragmented, including many task variants that make fair comparisons difficult. Some tasks include named entity recognition (mention span prediction) as well as entity disambiguation, while others are concerned only with disambiguation (the former is often referred to as endto-end. Some tasks include the problem of predicting a NIL label for mentions that do not correspond to any entity in the KB, while others ignore such cases. Still other tasks focus on named or proper noun mentions, while others include disambiguation of concepts. These variations and the resulting fragmentation of evaluation is discussed at length by Ling et al. (2015) and Hachey et al. (2013) , and partially addressed by attempts to consolidate datasets (Cornolti et al., 2013) and metrics (Usbeck et al., 2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 684, |
|
"end": 702, |
|
"text": "Ling et al. (2015)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 707, |
|
"end": 727, |
|
"text": "Hachey et al. (2013)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 790, |
|
"end": 813, |
|
"text": "(Cornolti et al., 2013)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 826, |
|
"end": 847, |
|
"text": "(Usbeck et al., 2015)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Since our primary goal is to demonstrate the viability of our unified modeling approach for entity retrieval, we choose to focus on just the disambiguation task, ignoring NIL mentions, where our set of entity candidates includes every entry in the English Wikipedia.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In addition, some tasks include relevant training data, which allows a model trained on Wikipedia (for example) to be tuned to the target domain. We save this fine-tuning for future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Training data Wikipedia is an ideal resource for training entity resolution systems because many mentions are resolved via internal hyperlinks (the mention span is the anchor text). We use the 2018-10-22 English Wikipedia dump, which includes 5.7M entities and 112.7M linked mentions (labeled examples). We partition this dataset into 99.9% for training and the remainder for model selection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Since Wikipedia is a constantly growing an evolving resource, the particular version used can significantly impact entity linking results. For example, when the TACKBP-2010 evaluation dataset was published, Wikipedia included around 3M entities, so the number of retrieval candidates has increased by nearly two times. While this does mean new contexts are seen for many entities, it also means that retrieval gets more difficult over time. This is another factor that makes fair comparisons challenging.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Evaluation data There are a number of annotated datasets available for evaluating entity linking systems. Given the choices discussed above, the TACKBP-2010 dataset 1 is the most widely used evaluation that matches our constraints and allows us to compare to a reasonable variety of prior work. It includes 1020 annotated mention/entity pairs derived from 1013 original news and web documents. While there is a related development set associated with this evaluation set, we do not use it for any fine-tuning, as explained above.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "To further validate our results, we also include a new evaluation set called Wikinews, which includes news pages from Wikinews 2 in English for the year 2018. It includes 2263 annotated mention/entity pairs derived from 1801 original documents. Because we pulled these documents at the same time as the Wikipedia dump, the entity annotations are consistent with our training set and have not been subject to the kind of gradual rot that befalls older evaluation data as the updated KB diverges from the annotations. This data is available here: https://github. com/google-research/google-research/ tree/master/dense_representations_ for_entity_retrieval/.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We use nearest neighbor search to retrieve entities based on a mention in context, after learning dense, fixedlength vector representations of each.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Entity retrieval model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The dual encoder is a two-tower architecture suitable for retrieval (Gillick et al., 2018) . It has one network structure for encoding mentions (including their contexts), a second for encoding entities (including KB features), and a cosine function to compute similarity between representations (which must be the same dimension). A key property of this architecture is that there is no direct interaction between the encoders on each side. This enables efficient retrieval, but constrains the set of allowable network structures. The dual encoder learns a mention encoder \u03c6 and an entity encoder \u03c8, where the score of a mention-entity pair (m, e) defined as: s(m, e) = cos(\u03c6(m), \u03c8(e))", |
|
"cite_spans": [ |
|
{ |
|
"start": 68, |
|
"end": 90, |
|
"text": "(Gillick et al., 2018)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dual Encoder model", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "(1) Figure 1 shows the full model architecture and the feature inputs to each encoder. We use a compound encoder (Figure 1d ) to add useful sub-structure to each tower. The mention-side encoder first combines the context features, and then combines the result with the mention span encoding. Similarly, the entity-side encoder first combines the entity paragraph and categories, and then combines the result with the entity title encoding.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 4, |
|
"end": 12, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 113, |
|
"end": 123, |
|
"text": "(Figure 1d", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dual Encoder model", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The mention encoder uses four text features to capture both the span text and the textual context surrounding it. The context features include the five tokens immediately to the left and right of the span. In the sentence feature, the mention span is replaced by a special symbol.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dual Encoder model", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The entity encoder uses the entity's title and the first paragraph of its Wikipedia page as text features. It additionally incorporates the unedited user-specified categories associated with the entity. We do not use the entity IDs as features so that the model generalizes more easily to new entities unseen at training time. In fact, more than 1M candidate entities available at retrieval time have no associated training examples, but this architecture allows these to be encoded using their feature representations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dual Encoder model", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "A shared embedding look-up is used for all text features ( Figure 1b ). Specifically, we embed all unigrams and bigrams to get 300-dimensional averaged unigram embeddings and 300-dimensional averaged bigram embeddings for each text feature. Unigram embeddings are initialized from GloVe vectors (Pennington et al., 2014) , and we use 5M hash buckets for out-of-vocabulary unigrams and bigrams (Ganchev and Dredze, 2008) . These averaged embeddings are concatenated and then passed through a feed-forward layer. For the category features, each entity category name is treated as a sparse input, and the embeddings for all categories for an entity are averaged to produce a 300-dimensional representation, which in turn is passed through a feed-forward layer (Figure 1c) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 295, |
|
"end": 320, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 393, |
|
"end": 419, |
|
"text": "(Ganchev and Dredze, 2008)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 68, |
|
"text": "Figure 1b", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 757, |
|
"end": 768, |
|
"text": "(Figure 1c)", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dual Encoder model", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Our experiments show that this architecture is highly effective for both retrieval and resolution. Nevertheless, we expect that additional modeling ideas will further improve performance, especially for resolution. Recent work such as Durrett and Klein (2014) has shown improvements derived from better, longer-range, context features; similarly, there are many more potentially useful KB-derived features. More complex encoder architectures that use some form of attention over the input tokens and features could also be beneficial.", |
|
"cite_spans": [ |
|
{ |
|
"start": 235, |
|
"end": 259, |
|
"text": "Durrett and Klein (2014)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dual Encoder model", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Our training data is described in Section 3. The inputs to the entity encoder are constructed from the true entity referred by the landing page. The inputs to the mention encoder are constructed from the source page, using the mention span and surrounding context.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "These pairs constitute only positive examples, so we use in-batch random negatives (Henderson et al., 2017; Gillick et al., 2018) : for each mention-entity pair in a training batch, the other entities in the batch are used as negatives. Computationally, this amounts to building the all-pairs similarity matrix for all mentions and entities in a batch. We optimize softmax loss on each row of the matrix, so that the model is trained to maximize the score of the correct entity with respect to random entities. This is a version of the sampled softmax (Jozefowicz et al., 2016), which we use in place of the full softmax because the normalization term is intractable to compute over all 5.7M entities.", |
|
"cite_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 107, |
|
"text": "(Henderson et al., 2017;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 108, |
|
"end": 129, |
|
"text": "Gillick et al., 2018)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The softmax loss is not directly applied to the raw cosine similarities. Instead, a scalar multiplier a is learned to map the similarities (in the range [\u22121, 1]) to unbounded logits. For each training pair (m i , e i ) in a batch of B pairs, the loss is computed as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L(m i , e i ) = \u2212f (m i , e i ) + log B j=1 exp(f (m i , e j )) (2) where f (m i , e j ) = a \u2022 s(m i , e j )", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Training", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We track in-batch recall@1 (accuracy) on the held out set during training. For each instance, the model gets a score of 1 if the correct entity is ranked above all in-batch random negatives, 0 otherwise. We stop training after the metric flattens out (about 40M steps).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "For all experiments, we use a batch size of 100, standard SGD with Momentum of 0.9 and a fixed learning rate 0.01.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Our aim here is to demonstrate a pure retrieval system, so we train our models solely from Wikipedia and refrain from tuning them explicitly on in-domain documents from the evaluation tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Random negatives alone are not enough to train an accurate entity resolution model because scoring the correct entity above random alternatives can typically be achieved just by comparing the mention text and entity title. More challenging negative examples must be introduced to force the model to exploit context. This strategy is somewhat akin to Importance Sampling (Bengio et al., 2003) , for example.", |
|
"cite_spans": [ |
|
{ |
|
"start": 370, |
|
"end": 391, |
|
"text": "(Bengio et al., 2003)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hard negative mining", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "After learning an initial model using random negatives, we identify hard negatives via the following steps:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hard negative mining", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "1. Encode all mentions and entities found in training pairs using the current model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hard negative mining", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "2. For each mention, retrieve the most similar 10 entities (i.e., its nearest neighbors).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hard negative mining", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "3. Select all entities that are ranked above the correct one for the mention as negative examples.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hard negative mining", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "This yields new negative mention/entity pairs for which the model assigns a high score. It crucially relies on the fact that there is just one correct entity, unlike other tasks that consider general similarity or relatedness (and which are well served by random negatives). For example, negatives mined in this way for paraphrasing or image captioning tasks could actually turn out to be positives that were not explicitly labeled in the data. It is precisely because the distribution over candidate entities that match a contextualized mention tends to have such low entropy that makes negative mining such a good fit for this task. After merging these with the original positive pairs to construct a classification task, we resume training the initial dual encoder model using logistic loss on this new set of pairs. To retain good performance on random negatives, the new task is mixed with the original softmax task in a multi-task learning setup in which the two loss functions are combined with equal weight and optimized together. For a pair (m, e) with label y \u2208 {0, 1}, the hard negative loss is defined as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hard negative mining", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L h (m, e; y) = \u2212 y \u2022 log f (m, e) \u2212 (1 \u2212 y) \u2022 log(1 \u2212 f (m, e))", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Hard negative mining", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hard negative mining", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "f (m, e) = g(a h \u2022 s(m, e) + b h )", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Hard negative mining", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Here, g(x) = 1/(1 + e \u2212x ) is the logistic function, and a h and b h are learned scalar parameters to transform the cosine similarity into a logit. 3 For the hard negatives task, we track Area Under the ROC curve (AUC) on a held out set of pairs. We stop training when the average of the evaluation metrics for the two tasks stabilizes (about 5M steps).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hard negative mining", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Finally, we iteratively apply this negative mining procedure. For each round, we mine negatives from the current model as described above and then append the new hard examples to the classification task. Thus each subsequent round of negative mining adds fewer and fewer new examples, which yields a stable and naturally convergent process. As we show in our experiments, iterative hard negative mining produces large performance gains.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hard negative mining", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Once the model is trained, we use the entity encoder to pre-compute encodings for all candidate entities (including those that do not occur in training). At prediction time, mentions are encoded by the mention encoder and entities are retrieved based on their cosine similarity. Since our focus is on model training, we use brute-force search in our evaluation. However, for online settings and larger knowledge bases, an approximate search algorithm is required. In Section 5.2 we show that, when using approximate search, the system retains its strong performance while obtaining a nearly 100x speedup on an already fast retrieval.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inference", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "We demonstrate our model performance as compared to a baseline alias table. As is standard, it is built by counting all (mention span, entity) pairs in the training data. The counts are used to estimate prior probabilities P (e|m) of an entity given a mention span (alias); for each entity, the aliases are ordered according to these priors and limited to the top 100 candidates.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation setup", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "P (e|m) = count(e, m)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation setup", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "e \u2208E count(e , m)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation setup", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Here, count(e, m) is the number of occurrences of mention m linked to entity e, and E is the set of all entities appearing in the data. Since alias table construction is often extended with various heuristics, we also include a variant that includes unigrams and bigrams of the mention text as additional aliases. This can help when the entities (specifically person names) are referenced as last/first name at inference time.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation setup", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Finally, since we are primarily concerned with demonstrating performance of a retrieval system (as opposed", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation setup", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "R@1 Entities AT-Prior 71.9 5.7M AT-Ext 73.3 5.7M Chisholm and Hachey (2015) 80.7 800K He et al. (2013) 81.0 1.5M Sun et al. (2015) 83.9 818K Yamada et al. (2016) 85.2 5.0M Nie et al. (2018) 86.4 5.0M Barrena et al. (2018) 87.3 523K DEER (this work) 87.0 5.7M Table 1 : Comparison of relevant TACKBP-2010 results using Recall@1 (accuracy). While we cannot control the candidate entity set sizes, we attempt to approximate them here.", |
|
"cite_spans": [ |
|
{ |
|
"start": 86, |
|
"end": 102, |
|
"text": "He et al. (2013)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 113, |
|
"end": 130, |
|
"text": "Sun et al. (2015)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 141, |
|
"end": 161, |
|
"text": "Yamada et al. (2016)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 172, |
|
"end": 189, |
|
"text": "Nie et al. (2018)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 200, |
|
"end": 221, |
|
"text": "Barrena et al. (2018)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 259, |
|
"end": 266, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "to a re-ranking system) or a combination of the two, we include results using the standard BM25 retrieval algorithm (the Gensim implementation 4 ). We found that indexing each entity using its title gave much better results than indexing with the first paragraph text (or the full document text). We measure recall@k (R@k), defined as the proportion of instances where the true entity is in the top k retrieved items. We report R@1 (accuracy of the top retrieved result), which is standard for TAC/KBP-2010, as well R@100, which better captures overall retrieval performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We refer to the models with these abbreviations:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 AT-Prior: The alias table ordered by P (e|m).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 AT-Ext: The heuristically extended alias table.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 BM25: The BM25 retrieval algorithm, where each entity is indexed using its title.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 DEER: Our Dual Encoder for Entity Resolution, as described in section 4. 2018report 89.1%), we limit comparisons to local resolution. We also limit comparisons to systems that ignore NIL mentions (referred to as in-KB accuracy), so all those reported in the table evaluate precisely the same set of mentions. As noted earlier, we cannot control the candidate sets used in each of these experiments, and we are at some disadvantage given our larger set of candidates. Resolution performance To put DEER's architecture and performance in context, we compare it with prior work in some more detail here. He et al. (2013) use a dual encoder setup to train a re-ranker, but start with unsupervised training to build representations of contexts and entities using Denoising Autoencoders. They use an alias table for candidate generation, and then train a ranking model using mentionspecific batching to obtain hard in-batch negatives. Our results suggest that the autoencoder pretraining is not necessary and that our unsupervised negative mining can outperform heuristic selection of negatives. Sun et al. (2015) also use a dual encoder that has similar structure to ours. Like He et al. (2013) , they use it to score entries from an alias table rather than directly for retrieval. Their mention encoder is a considerably more complex combination of mention and context rather than the simple compounding strategy in our architecture. Their alias table method not only maps mentions to entities, but also uses additional filters to reduce the set of candidate entities based on words in the mention's context. They report that this method has a recall of 91.2% on TACKBP 2010, while our direct retrieval setup gives Recall@100 of 96.3% (see Table 2 ). They train their representations for each true mentionentity pair against a single random negative entity for the mention, whereas our method takes advantage of the entire batch of random negatives as well further refinement through hard negative mining. Yamada et al. (2016) use an alias table derived from the December 2014 Wikipedia dump, restricted to the fifty most popular entities per mention. They tune their model on the TACKBP 2010 training set. Architecturally, they include features that capture the alias table priors and string similarities, both of which are not feasible in a dual encoder configuration that precludes direct comparison between mention-and entity-side features. DEER's better results indicate that learned representations of mentions and entities can be powerful enough for entity retrieval even without any cross-attention. Nie et al. (2018) define a complex model that uses both entity type information and attention between the mention string and the entity description. To augment the small 1500 example training data in TACKBP, they Table 3 : Comparison of nearest-neighbor search methods using the DEER model. The benchmark was conducted on a single machine. AH indicates quantizationbased asymmetric hashing; AH+Tree adds an initial tree search to further reduce the search space.", |
|
"cite_spans": [ |
|
{ |
|
"start": 603, |
|
"end": 619, |
|
"text": "He et al. (2013)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1092, |
|
"end": 1109, |
|
"text": "Sun et al. (2015)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 1175, |
|
"end": 1191, |
|
"text": "He et al. (2013)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 2004, |
|
"end": 2024, |
|
"text": "Yamada et al. (2016)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 2606, |
|
"end": 2623, |
|
"text": "Nie et al. (2018)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1738, |
|
"end": 1745, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2819, |
|
"end": 2826, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "also collected 55k mentions found in Wikipedia that were active in TACKBP 2010 to train this model. DEER is simply trained over all entities in Wikipedia and uses no cross-attention or explicit type information, yet delivers better resolution performance. Most standard entity linking models build a single ranking model on top of the candidate set provided by an alias table. Barrena et al. (2018) instead train 523k mention-specific deep classifiers-effectively treating entity linking as a special form of word sense disambiguation. They do this by pre-training a single LSTM that predicts among 248k mentions, and then the parameters of this model are used to warm start each of the 523k mention-specific models. In doing so, they learn an effective context encoding, and can then fine-tune each mention model to discriminate among the small set of popular candidate entities for the mention (their alias table uses a cutoff of the thirty most popular entities for each mention). DEER in contrast, has a single mention encoder that is simple and fast, and performs nearly equivalently while retrieving from a much larger set of entities. Tables 1 and 2 report performance using brute force nearest-neighbor search. That is, we score each mention against all 5.7M entities to get the top k neighbors. However, a major motivation for using a single-stage retrieval model is that it can allow scaling to much larger knowledge bases by reducing retrieval time via approximate search.", |
|
"cite_spans": [ |
|
{ |
|
"start": 377, |
|
"end": 398, |
|
"text": "Barrena et al. (2018)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1142, |
|
"end": 1156, |
|
"text": "Tables 1 and 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Retrieval performance", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To estimate performance in a real-world setting, we repeat the evaluation of DEER using the quantizationbased approaches described by Guo et al. (2016) . Table 3 shows the trade-off between search time and recall on Wikinews. Compared to brute force, search time can be reduced by an order of magnitude with a small loss in R@100, or by two orders of magnitude while losing less than 3 points. This is crucial for scaling the approach to even larger KBs and supporting the latency requirements of real-world applications. Figure 2 shows the improvement in Recall@1 from each round of hard negative mining. The first iteration gives a large improvement over the initial round of training with only random negatives. Successive iterations yield further gains, eventually flattening out. Our strategy of append- ing each new set of hard negatives to the previously mined ones means that each new set has proportionately less influence-this trades off some opportunity for improving the model in favor of stability.", |
|
"cite_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 151, |
|
"text": "Guo et al. (2016)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 154, |
|
"end": 161, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 522, |
|
"end": 530, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Approximate search", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Here, we show a variety of examples to elucidate how DEER effectively models context, and to provide intuition for the learned entity representations. First, we compare entities retrieved by DEER with those retrieved by the alias table baseline. Table 5 shows some instances where the alias table does not contain the correct entity for a given mention text (in the top 100 neighbors) or it fails to return any entity at all. In all of these cases, it is clear that context is essential. While a scoring stage is intended to leverage context, it is limited by the set of retrieved entities; our approach uses context directly.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 246, |
|
"end": 253, |
|
"text": "Table 5", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Qualitative analysis", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "For example, mentions like Costa and Justin are linked to the correct entities in the alias table, but with such low prior probability that we would need to retrieve far more than the top 100 entities to consider them. At the other extreme, mentions like Refoundation Communists and European EADS are missed by the baseline because they don't have direct string matches in the alias table. Additional extensions to our alias table allowing token re-ordering could help catch the former (though this might reduce precision too much), but it's unlikely that any alias table could connect European EADS with Airbus in the absence of an explicit anchor-text link. This example helps highlight how a fully learned retrieval model can generalize to new data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Qualitative analysis", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Second, Table 6 shows more directly how modifying the context around a mention span changes the retrieved entities. For example, the model correctly differentiates between Phoenix the city, Phoenix the band, and Phoenix in mythology based on the sentence surrounding an identical mention span.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 15, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Qualitative analysis", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Third, since our model produces encodings for all 5.7M entities, we can retrieve nearest neighbors for any Table 4 . The model tends to prefer related entities of the same type, and often ones that share portions of their names, probably because entity titles are so important to linking with mentions. The nearest neighbors for Jorge Costa, our running example, include a variety of retired Portuguese football players, many of whom have Costa in their names. Finally, Figure 3 is a t-SNE projection of the entity encodings for a selection of cities, bands, and people (nobel literature winners). The cities and bands were chosen to have high word overlap, e.g. Montreal (city) and Of Montreal (band), to demonstrate how our entity embeddings differ from standard word embeddings. Note also the sub-clusters that form within each type cluster. Latin American authors cluster together, as do the Existentialists; the cities have some geographical proximity, though Brazil and Portugal are neighbors, presumably because of shared language and culture.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 114, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 470, |
|
"end": 478, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Qualitative analysis", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Our results with DEER show that a single-stage retrieval approach for entities from mentions is highly effective: without any domain-specific tuning, it performs at least as well as the best comparable two-stage systems. While our bag-of-ngrams encoders provided a strong proof of concept, we can almost certainly improve results with more sophisticated encoders, using a BERT architecture (Devlin et al., 2019) , for example. Further, by virtue of approximate search techniques, it can be used for very Mention Model predictions From 1996, Cobra was brewed under contract by Charles Wells Ltd and experienced strong growth in sales for the next ten years.", |
|
"cite_spans": [ |
|
{ |
|
"start": 390, |
|
"end": 411, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The Cobra Group, Cobra Beer, Cobra (Tivoli Friheden) Guys fondly remembered Cobra -the band from Memphis featuring Jimi Jamison and Mandy Meyer who released one Album -Frist Strike -before the Band split! Cobra (American band), Wadsworth Jarrell, Cobra Records, Cobra (Japanese band) Since the late 18th century, Paris has been famous for its restaurants and haute cuisine, food meticulously prepared and artfully presented.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Kim Kardashian may be a household name now, but that wasnt always the caseand it may all be because of pal Paris.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paris, Nice, Bucharest", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Paris, Paris Hilton, Paris syndrome Rory and Paris are the only two people on Gilmore Girls who share the same goals. Paris, Paris (mythology), Paris Geller Texas was finally annexed when the expansionist James K. Polk won the election of 1844 who ordered General Zachary Taylor south to the Rio Grande on January 13, 1846.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paris, Nice, Bucharest", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Fronted by Sharleen Spiteri, Texas have released eight studio albums and are known for songs such as 'I Don't Want a Lover', 'Say What You Want', 'Summer Son' and 'Inner Smile' Texas (band), Texas, Tich (singer) There is an amazing piece of historic architecture set in downtown Phoenix that was build in 1929 in the Spanish Baroque style and features intricate murals and moldings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Texas, Texas annexation, Texas in the American Civil War", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Phoenix once again played another late night show, now they have Late Night with Jimmy Fallon where they played a great rendition of 'Lisztomania' Phoenix (band), Joaquin Phoenix, Phoenix, Arizona According to Greek mythology, the Phoenix lived in Arabia next to a well where the Greek sun-god Apollo stopped his chariot in order to listen to its song. Phoenix (mythology), Phoenix (son of Amyntor), Phoenix (son of Agenor) Table 6 : Changing the context around a mention span changes the mention encoding, and thus the set of retrieved neighbors. Figure 3 : A 2D projection of cities, bands, and people embeddings (using t-SNE), color coded by their category.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 424, |
|
"end": 431, |
|
"text": "Table 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 548, |
|
"end": 556, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Phoenix, Arizona, Prescott, Arizona", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "fast retrieval, and is likely to scale reasonably to much larger knowledge bases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Phoenix, Arizona, Prescott, Arizona", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We also note that the dual encoder approach allows for interesting extensions beyond traditional entity linking. For example, the context encodings provide a natural model for building entity expectations during text processing, such that entities relevant to the context can be retrieved and used for reference resolution as a document is processed incrementally. We expect this will be useful for collective entity resolution as well as modeling coherence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Phoenix, Arizona, Prescott, Arizona", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Finally, while we focus on training with English Wikipedia, Sil et al. (2018) show that using cross-lingual datasets can help to refine the context information more effectively. Since English constitutes only a fraction of the total Wikipedia, and entity IDs are (mostly) language-independent, there is great opportunity to extend this work to far more training examples across far more languages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 60, |
|
"end": 77, |
|
"text": "Sil et al. (2018)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Phoenix, Arizona, Prescott, Arizona", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://tac.nist.gov/ 2 https://en.wikinews.org", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The additive parameter is only needed for the logistic loss component, as the softmax function is invariant to translation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://radimrehurek.com/gensim/ summarization/bm25.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The authors would like to thank Ming-Wei Chang, Jan Botha, Slav Petrov, and the anonymous reviewers for their helpful comments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Near-optimal hashing algorithms for approximate nearest neighbor in high dimensions", |
|
"authors": [ |
|
{ |
|
"first": "Alexandr", |
|
"middle": [], |
|
"last": "Andoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Indyk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Communications of the ACM", |
|
"volume": "51", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexandr Andoni and Piotr Indyk. 2008. Near-optimal hashing algorithms for approximate nearest neighbor in high dimensions. Communications of the ACM 51(1):117.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Learning text representations for 500k classification tasks on named entity disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Ander", |
|
"middle": [], |
|
"last": "Barrena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aitor", |
|
"middle": [], |
|
"last": "Soroa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "171--180", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ander Barrena, Aitor Soroa, and Eneko Agirre. 2018. Learning text representations for 500k classification tasks on named entity disambiguation. In CoNLL. pages 171-180.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Quick training of probabilistic neural nets by importance sampling", |
|
"authors": [ |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean-S\u00e9bastien", |
|
"middle": [], |
|
"last": "Sen\u00e9cal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "AISTATS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoshua Bengio, Jean-S\u00e9bastien Sen\u00e9cal, et al. 2003. Quick training of probabilistic neural nets by impor- tance sampling. In AISTATS. pages 1-9.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Signature verification using a\" siamese\" time delay neural network", |
|
"authors": [ |
|
{ |
|
"first": "Jane", |
|
"middle": [], |
|
"last": "Bromley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isabelle", |
|
"middle": [], |
|
"last": "Guyon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Lecun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "S\u00e4ckinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roopak", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "737--744", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jane Bromley, Isabelle Guyon, Yann LeCun, Eduard S\u00e4ckinger, and Roopak Shah. 1994. Signature verifi- cation using a\" siamese\" time delay neural network. In Advances in Neural Information Processing Sys- tems. pages 737-744.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Entity disambiguation with web links", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Chisholm", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Hachey", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Transactions of the ACL", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "145--156", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew Chisholm and Ben Hachey. 2015. Entity disam- biguation with web links. Transactions of the ACL 3:145-156.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Learning a similarity metric discriminatively, with application to face verification", |
|
"authors": [ |
|
{ |
|
"first": "Sumit", |
|
"middle": [], |
|
"last": "Chopra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raia", |
|
"middle": [], |
|
"last": "Hadsell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Lecun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "CVPR. IEEE", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "539--546", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sumit Chopra, Raia Hadsell, and Yann LeCun. 2005. Learning a similarity metric discriminatively, with application to face verification. In CVPR. IEEE, vol- ume 1, pages 539-546.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A framework for benchmarking entityannotation systems", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Cornolti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Ferragina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Massimiliano", |
|
"middle": [], |
|
"last": "Ciaramita", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 22nd international conference on World Wide Web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "249--260", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Cornolti, Paolo Ferragina, and Massimiliano Cia- ramita. 2013. A framework for benchmarking entity- annotation systems. In Proceedings of the 22nd inter- national conference on World Wide Web. ACM, pages 249-260.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In NAACL-HLT. pages 4171-4186.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A joint model for entity analysis: Coreference, typing, and linking", |
|
"authors": [ |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Durrett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Transactions of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Greg Durrett and Dan Klein. 2014. A joint model for entity analysis: Coreference, typing, and linking. In Transactions of the ACL.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Named entity disambiguation for noisy text", |
|
"authors": [ |
|
{ |
|
"first": "Yotam", |
|
"middle": [], |
|
"last": "Eshel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kira", |
|
"middle": [], |
|
"last": "Radinsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaul", |
|
"middle": [], |
|
"last": "Markovitch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ikuya", |
|
"middle": [], |
|
"last": "Yamada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "58--68", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yotam Eshel, Noam Cohen, Kira Radinsky, Shaul Markovitch, Ikuya Yamada, and Omer Levy. 2017. Named entity disambiguation for noisy text. In CoNLL. Vancouver, Canada, pages 58-68.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Capturing semantic similarity for entity linking with convolutional neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Francis-Landau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Durrett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1256--1261", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Francis-Landau, Greg Durrett, and Dan Klein. 2016. Capturing semantic similarity for entity linking with convolutional neural networks. In NAACL-HLT. San Diego, California, pages 1256-1261.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Small statistical models by random feature mixing", |
|
"authors": [ |
|
{ |
|
"first": "Kuzman", |
|
"middle": [], |
|
"last": "Ganchev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Dredze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the ACL-08: HLT Workshop on Mobile Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "19--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kuzman Ganchev and Mark Dredze. 2008. Small sta- tistical models by random feature mixing. In Pro- ceedings of the ACL-08: HLT Workshop on Mobile Language Processing. pages 19-20.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Deep joint entity disambiguation with local neural attention", |
|
"authors": [ |
|
{ |
|
"first": "Eugen", |
|
"middle": [], |
|
"last": "Octavian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Ganea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hofmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Octavian-Eugen Ganea and Thomas Hofmann. 2017. Deep joint entity disambiguation with local neural attention. CoRR abs/1704.04920.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "End-to-end retrieval in continuous space", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Gillick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Presta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gaurav Singh", |
|
"middle": [], |
|
"last": "Tomar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Gillick, Alessandro Presta, and Gaurav Singh Tomar. 2018. End-to-end retrieval in continuous space .", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Collective entity resolution with multi-focal attention", |
|
"authors": [ |
|
{ |
|
"first": "Nevena", |
|
"middle": [], |
|
"last": "Amir Globerson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soumen", |
|
"middle": [], |
|
"last": "Lazic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amarnag", |
|
"middle": [], |
|
"last": "Chakrabarti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Subramanya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Ringaard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amir Globerson, Nevena Lazic, Soumen Chakrabarti, Amarnag Subramanya, Michael Ringaard, and Fer- nando Pereira. 2016. Collective entity resolution with multi-focal attention. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Quantization based fast inner product search", |
|
"authors": [ |
|
{ |
|
"first": "Ruiqi", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjiv", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Krzysztof", |
|
"middle": [], |
|
"last": "Choromanski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Simcha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Artificial Intelligence and Statistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "482--490", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruiqi Guo, Sanjiv Kumar, Krzysztof Choromanski, and David Simcha. 2016. Quantization based fast inner product search. In Artificial Intelligence and Statis- tics. pages 482-490.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Entity linking via joint encoding of types, descriptions, and context", |
|
"authors": [ |
|
{ |
|
"first": "Nitish", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2681--2690", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nitish Gupta, Sameer Singh, and Dan Roth. 2017. En- tity linking via joint encoding of types, descriptions, and context. In EMNLP. pages 2681-2690.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Evaluating entity linking with wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Hachey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Nothman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Honnibal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James R", |
|
"middle": [], |
|
"last": "Curran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Artificial intelligence", |
|
"volume": "194", |
|
"issue": "", |
|
"pages": "130--150", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ben Hachey, Will Radford, Joel Nothman, Matthew Honnibal, and James R Curran. 2013. Evaluating entity linking with wikipedia. Artificial intelligence 194:130-150.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Learning entity representation for entity disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Zhengyan", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shujie", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mu", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Longkai", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Houfeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "30--34", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhengyan He, Shujie Liu, Mu Li, Ming Zhou, Longkai Zhang, and Houfeng Wang. 2013. Learning entity representation for entity disambiguation. In ACL. Sofia, Bulgaria, pages 30-34.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Balint Miklos, and Ray Kurzweil. 2017. Efficient natural language response suggestion for smart reply", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Henderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rami", |
|
"middle": [], |
|
"last": "Al-Rfou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Strope", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yunhsuan", |
|
"middle": [], |
|
"last": "Sung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laszlo", |
|
"middle": [], |
|
"last": "Lukacs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruiqi", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1705.00652" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Henderson, Rami Al-Rfou, Brian Strope, Yun- hsuan Sung, Laszlo Lukacs, Ruiqi Guo, Sanjiv Ku- mar, Balint Miklos, and Ray Kurzweil. 2017. Effi- cient natural language response suggestion for smart reply. arXiv preprint arXiv:1705.00652 .", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Dynamic entity representations in neural language models", |
|
"authors": [ |
|
{ |
|
"first": "Yangfeng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenhao", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Martschat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yangfeng Ji, Chenhao Tan, Sebastian Martschat, Yejin Choi, and Noah A. Smith. 2017. Dynamic entity representations in neural language models. CoRR abs/1708.00781.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Exploring the limits of language modeling", |
|
"authors": [ |
|
{ |
|
"first": "Rafal", |
|
"middle": [], |
|
"last": "Jozefowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonghui", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1602.02410" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rafal Jozefowicz, Oriol Vinyals, Mike Schuster, Noam Shazeer, and Yonghui Wu. 2016. Exploring the limits of language modeling. arXiv preprint arXiv:1602.02410 .", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Design challenges for entity linking", |
|
"authors": [ |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "Ling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Weld", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Transactions of the ACL", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "315--328", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiao Ling, Sameer Singh, and Daniel S Weld. 2015. Design challenges for entity linking. Transactions of the ACL 3:315-328.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Mention and entity description co-attention for entity disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Feng", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yunbo", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinpeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5908--5915", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Feng Nie, Yunbo Cao, Jinpeng Wang, Chin-Yew Lin, and Rong Pan. 2018. Mention and entity descrip- tion co-attention for entity disambiguation. In AAAI. Vancouver, Canada, pages 5908-5915.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. Glove: Global vectors for word representation. In In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Local and global algorithms for disambiguation to wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Lev", |
|
"middle": [], |
|
"last": "Ratinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "ACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1375--1384", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lev Ratinov, Dan Roth, Doug Downey, and Mike An- derson. 2011. Local and global algorithms for disam- biguation to wikipedia. In ACL-HLT. Stroudsburg, PA, USA, pages 1375-1384.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "A survey of available corpora for building data-driven dialogue systems: The journal version", |
|
"authors": [ |
|
{ |
|
"first": "Iulian", |
|
"middle": [], |
|
"last": "Vlad Serban", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Lowe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Henderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Charlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joelle", |
|
"middle": [], |
|
"last": "Pineau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "D&D", |
|
"volume": "9", |
|
"issue": "1", |
|
"pages": "1--49", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iulian Vlad Serban, Ryan Lowe, Peter Henderson, Lau- rent Charlin, and Joelle Pineau. 2018. A survey of available corpora for building data-driven dialogue systems: The journal version. D&D 9(1):1-49.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Training region-based object detectors with online hard example mining", |
|
"authors": [ |
|
{ |
|
"first": "Abhinav", |
|
"middle": [], |
|
"last": "Shrivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhinav", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ross", |
|
"middle": [], |
|
"last": "Girshick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "761--769", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhinav Shrivastava, Abhinav Gupta, and Ross Gir- shick. 2016. Training region-based object detectors with online hard example mining. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pages 761-769.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Neural cross-lingual entity linking", |
|
"authors": [ |
|
{ |
|
"first": "Avirup", |
|
"middle": [], |
|
"last": "Sil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gourab", |
|
"middle": [], |
|
"last": "Kundu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radu", |
|
"middle": [], |
|
"last": "Florian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wael", |
|
"middle": [], |
|
"last": "Hamza", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5465--5472", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Avirup Sil, Gourab Kundu, Radu Florian, and Wael Hamza. 2018. Neural cross-lingual entity linking. In AAAI. Vancouver, Canada, pages 5465-5472.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Modeling mention, context and entity with neural networks for entity disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Yaming", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Duyu", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhenzhou", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaolong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Twenty-Fourth International Joint Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yaming Sun, Lei Lin, Duyu Tang, Nan Yang, Zhenzhou Ji, and Xiaolong Wang. 2015. Modeling mention, context and entity with neural networks for entity disambiguation. In Twenty-Fourth International Joint Conference on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Proceedings of the 24th international conference on World Wide Web", |
|
"authors": [ |
|
{ |
|
"first": "Ricardo", |
|
"middle": [], |
|
"last": "Usbeck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "R\u00f6der", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Axel-Cyrille Ngonga", |
|
"middle": [], |
|
"last": "Ngomo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ciro", |
|
"middle": [], |
|
"last": "Baron", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Both", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Br\u00fcmmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diego", |
|
"middle": [], |
|
"last": "Ceccarelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Cornolti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Didier", |
|
"middle": [], |
|
"last": "Cherix", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernd", |
|
"middle": [], |
|
"last": "Eickmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1133--1143", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ricardo Usbeck, Michael R\u00f6der, Axel-Cyrille Ngonga Ngomo, Ciro Baron, Andreas Both, Martin Br\u00fcmmer, Diego Ceccarelli, Marco Cornolti, Didier Cherix, Bernd Eickmann, et al. 2015. In Proceedings of the 24th international conference on World Wide Web. pages 1133-1143.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Joint learning of the embedding of words and entities for named entity disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Ikuya", |
|
"middle": [], |
|
"last": "Yamada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroyuki", |
|
"middle": [], |
|
"last": "Shindo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hideaki", |
|
"middle": [], |
|
"last": "Takeda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshiyasu", |
|
"middle": [], |
|
"last": "Takefuji", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1601.01343" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ikuya Yamada, Hiroyuki Shindo, Hideaki Takeda, and Yoshiyasu Takefuji. 2016. Joint learning of the em- bedding of words and entities for named entity dis- ambiguation. arXiv preprint arXiv:1601.01343 .", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Learning distributed representations of texts and entities from knowledge base", |
|
"authors": [ |
|
{ |
|
"first": "Ikuya", |
|
"middle": [], |
|
"last": "Yamada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroyuki", |
|
"middle": [], |
|
"last": "Shindo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hideaki", |
|
"middle": [], |
|
"last": "Takeda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshiyasu", |
|
"middle": [], |
|
"last": "Takefuji", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "TACL", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "397--411", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ikuya Yamada, Hiroyuki Shindo, Hideaki Takeda, and Yoshiyasu Takefuji. 2017a. Learning distributed rep- resentations of texts and entities from knowledge base. TACL 5:397-411.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Learning distributed representations of texts and entities from knowledge base", |
|
"authors": [ |
|
{ |
|
"first": "Ikuya", |
|
"middle": [], |
|
"last": "Yamada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroyuki", |
|
"middle": [], |
|
"last": "Shindo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hideaki", |
|
"middle": [], |
|
"last": "Takeda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshiyasu", |
|
"middle": [], |
|
"last": "Takefuji", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ikuya Yamada, Hiroyuki Shindo, Hideaki Takeda, and Yoshiyasu Takefuji. 2017b. Learning distributed rep- resentations of texts and entities from knowledge base. CoRR abs/1705.02494.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Learning discriminative projections for text similarity measures", |
|
"authors": [ |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Wen-Tau Yih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Platt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Meek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "247--256", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wen-tau Yih, Kristina Toutanova, John C Platt, and Christopher Meek. 2011. Learning discriminative projections for text similarity measures. In CoNLL. pages 247-256.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Generative discovery of relational medical entity pairs", |
|
"authors": [ |
|
{ |
|
"first": "Chenwei", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaliang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chenwei Zhang, Yaliang Li, Nan Du, Wei Fan, and Philip S. Yu. 2018. Generative discovery of relational medical entity pairs.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Architecture of the dual encoder model for retrieval (a). Common component architectures are shown for (b) text input, (c) sparse ID input, and (d) compound input joining multiple encoder outputs. Note that all text encoders share a common set of embeddings.", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"text": "Recall@1 improvement for successive iterations of hard negative mining for Wikinews (solid) and TACKBP-2010 (dashed).", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"text": "", |
|
"content": "<table><tr><td>provides a comparison against the most rele-</td></tr><tr><td>vant related work. While there are some reported im-</td></tr><tr><td>provements due to collective (global) resolution of all</td></tr><tr><td>mentions in a document (Globerson et al. (2016) report</td></tr><tr><td>87.2% and Nie et al.</td></tr></table>", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"text": "Table 2provides the percent of mentions for which the correct entity is found in the top 100 retrieved results, using the different baselines", |
|
"content": "<table><tr><td colspan=\"3\">System TACKBP-2010 Wikinews</td></tr><tr><td>AT-Prior</td><td>89.5</td><td>93.8</td></tr><tr><td>AT-Ext</td><td>91.7</td><td>94.0</td></tr><tr><td>BM25</td><td>68.9</td><td>83.2</td></tr><tr><td>DEER</td><td>96.3</td><td>97.9</td></tr><tr><td colspan=\"3\">Table 2: Retrieval evaluation comparison for TACKBP-</td></tr><tr><td colspan=\"2\">2010 and Wikinews using Recall@100.</td><td/></tr><tr><td colspan=\"3\">and the DEER model. The learned representations de-</td></tr><tr><td colspan=\"3\">liver superior performance and do not require special</td></tr><tr><td colspan=\"3\">handling for unigrams versus bigram lookups, counts</td></tr><tr><td colspan=\"2\">for entity prominence, and so on.</td><td/></tr></table>", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"html": null, |
|
"text": "Nearest neighbors retrieved by DEER for a sample of entities.", |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF7": { |
|
"html": null, |
|
"text": "Examples of test mentions that require making use of context, where the alias table does not retrieve the correct entity. We show the top entities returned by both systems, with the correct entity in bold.", |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |