|
{ |
|
"paper_id": "Q14-1019", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:11:09.185064Z" |
|
}, |
|
"title": "Entity Linking meets Word Sense Disambiguation: a Unified Approach", |
|
"authors": [ |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Moro", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Sapienza Universit\u00e0 di Roma", |
|
"location": { |
|
"addrLine": "Viale Regina Elena 295", |
|
"postCode": "00161", |
|
"settlement": "Roma", |
|
"country": "Italy" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Raganato", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Sapienza Universit\u00e0 di Roma", |
|
"location": { |
|
"addrLine": "Viale Regina Elena 295", |
|
"postCode": "00161", |
|
"settlement": "Roma", |
|
"country": "Italy" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Sapienza Universit\u00e0 di Roma", |
|
"location": { |
|
"addrLine": "Viale Regina Elena 295", |
|
"postCode": "00161", |
|
"settlement": "Roma", |
|
"country": "Italy" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Entity Linking (EL) and Word Sense Disambiguation (WSD) both address the lexical ambiguity of language. But while the two tasks are pretty similar, they differ in a fundamental respect: in EL the textual mention can be linked to a named entity which may or may not contain the exact mention, while in WSD there is a perfect match between the word form (better, its lemma) and a suitable word sense. In this paper we present Babelfy, a unified graph-based approach to EL and WSD based on a loose identification of candidate meanings coupled with a densest subgraph heuristic which selects high-coherence semantic interpretations. Our experiments show state-ofthe-art performances on both tasks on 6 different datasets, including a multilingual setting.", |
|
"pdf_parse": { |
|
"paper_id": "Q14-1019", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Entity Linking (EL) and Word Sense Disambiguation (WSD) both address the lexical ambiguity of language. But while the two tasks are pretty similar, they differ in a fundamental respect: in EL the textual mention can be linked to a named entity which may or may not contain the exact mention, while in WSD there is a perfect match between the word form (better, its lemma) and a suitable word sense. In this paper we present Babelfy, a unified graph-based approach to EL and WSD based on a loose identification of candidate meanings coupled with a densest subgraph heuristic which selects high-coherence semantic interpretations. Our experiments show state-ofthe-art performances on both tasks on 6 different datasets, including a multilingual setting.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The automatic understanding of the meaning of text has been a major goal of research in computational linguistics and related areas for several decades, with ambitious challenges, such as Machine Reading (Etzioni et al., 2006) and the quest for knowledge (Schubert, 2006) . Word Sense Disambiguation (WSD) (Navigli, 2009; Navigli, 2012) is a historical task aimed at assigning meanings to single-word and multi-word occurrences within text, a task which is more alive than ever in the research community.", |
|
"cite_spans": [ |
|
{ |
|
"start": 204, |
|
"end": 226, |
|
"text": "(Etzioni et al., 2006)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 255, |
|
"end": 271, |
|
"text": "(Schubert, 2006)", |
|
"ref_id": "BIBREF64" |
|
}, |
|
{ |
|
"start": 306, |
|
"end": 321, |
|
"text": "(Navigli, 2009;", |
|
"ref_id": "BIBREF55" |
|
}, |
|
{ |
|
"start": 322, |
|
"end": 336, |
|
"text": "Navigli, 2012)", |
|
"ref_id": "BIBREF56" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Recently, the collaborative creation of large semistructured resources, such as Wikipedia, and knowledge resources built from them (Hovy et al., 2013) , such as BabelNet (Navigli and Ponzetto, 2012a) , DBpedia (Auer et al., 2007) and YAGO2 (Hoffart et al., 2013) , has favoured the emergence of new tasks, such as Entity Linking (EL) (Rao et al., 2013) , and opened up new possibilities for tasks such as Named Entity Disambiguation (NED) and Wikification. The aim of EL is to discover mentions of entities within a text and to link them to the most suitable entry in a reference knowledge base. However, in contrast to WSD, a mention may be partial while still being unambiguous thanks to the context. For instance, consider the following sentence:", |
|
"cite_spans": [ |
|
{ |
|
"start": 131, |
|
"end": 150, |
|
"text": "(Hovy et al., 2013)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 170, |
|
"end": 199, |
|
"text": "(Navigli and Ponzetto, 2012a)", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 210, |
|
"end": 229, |
|
"text": "(Auer et al., 2007)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 240, |
|
"end": 262, |
|
"text": "(Hoffart et al., 2013)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 334, |
|
"end": 352, |
|
"text": "(Rao et al., 2013)", |
|
"ref_id": "BIBREF62" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "(1) Thomas and Mario are strikers playing in Munich.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This example makes it clear how intertwined the two tasks of WSD and EL are. In fact, on the one hand, striker and play are polysemous words which can be disambiguated by selecting the game/soccer playing senses of the two words in a dictionary; on the other hand, Thomas and Mario are partial mentions which have to be linked to the appropriate entries of a knowledge base, that is, Thomas M\u00fcller and Mario Gomez, two well-known soccer players.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The two main differences between WSD and EL lie, on the one hand, in the kind of inventory used, i.e., dictionary vs. encyclopedia, and, on the other hand, in the assumption that the mention is complete or potentially partial. Notwithstanding these differences, the tasks are similar in nature, in that they both involve the disambiguation of textual fragments according to a reference inventory. However, the research community has so far tackled the two tasks separately, often duplicating efforts and solutions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In contrast to this trend, research in knowledge acquisition is now heading towards the seamless in-tegration of encyclopedic and lexicographic knowledge into structured language resources (Hovy et al., 2013) , and the main representative of this new direction is undoubtedly BabelNet (Navigli and Ponzetto, 2012a) . Given such structured language resources it seems natural to suppose that they might provide a common ground for the two tasks of WSD and EL.", |
|
"cite_spans": [ |
|
{ |
|
"start": 189, |
|
"end": 208, |
|
"text": "(Hovy et al., 2013)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 285, |
|
"end": 314, |
|
"text": "(Navigli and Ponzetto, 2012a)", |
|
"ref_id": "BIBREF50" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "More precisely, in this paper we explore the hypothesis that the lexicographic knowledge used in WSD is also useful for tackling the EL task, and, vice versa, that the encyclopedic information utilized in EL helps disambiguate nominal mentions in a WSD setting. We propose Babelfy, a novel, unified graph-based approach to WSD and EL, which performs two main steps: i) it exploits random walks with restart, and triangles as a support for reweighting the edges of a large semantic network; ii) it uses a densest subgraph heuristic on the available semantic interpretations of the input text to perform a joint disambiguation with both concepts and named entities. Our experiments show the benefits of our synergistic approach on six gold-standard datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Word Sense Disambiguation (WSD) is the task of choosing the right sense for a word within a given context. Typical approaches for this task can be classified as i) supervised, ii) knowledge-based, and iii) unsupervised. However, supervised approaches require huge amounts of annotated data (Zhong and Ng, 2010; Shen et al., 2013; Pilehvar and Navigli, 2014) , an effort which cannot easily be repeated for new domains and languages, while unsupervised ones suffer from data sparsity and an intrinsic difficulty in their evaluation (Agirre et al., 2006; Brody and Lapata, 2009; Manandhar et al., 2010; Van de Cruys and Apidianaki, 2011; Di Marco and Navigli, 2013 ). On the other hand, knowledge-based approaches are able to obtain good performance using readily-available structured knowledge (Agirre et al., 2010; Guo and Diab, 2010; Ponzetto and Navigli, 2010; Miller et al., 2012; Agirre et al., 2014) . Some of these approaches marginally take into account the structural properties of the knowledge base (Mihalcea, 2005) . Other approaches, instead, leverage the structural properties of the knowledge base by exploiting centrality and connectivity measures (Sinha and Mihalcea, 2007; Tsatsaronis et al., 2007; Agirre and Soroa, 2009; Navigli and Lapata, 2010) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 290, |
|
"end": 310, |
|
"text": "(Zhong and Ng, 2010;", |
|
"ref_id": "BIBREF76" |
|
}, |
|
{ |
|
"start": 311, |
|
"end": 329, |
|
"text": "Shen et al., 2013;", |
|
"ref_id": "BIBREF66" |
|
}, |
|
{ |
|
"start": 330, |
|
"end": 357, |
|
"text": "Pilehvar and Navigli, 2014)", |
|
"ref_id": "BIBREF58" |
|
}, |
|
{ |
|
"start": 531, |
|
"end": 552, |
|
"text": "(Agirre et al., 2006;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 553, |
|
"end": 576, |
|
"text": "Brody and Lapata, 2009;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 577, |
|
"end": 600, |
|
"text": "Manandhar et al., 2010;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 601, |
|
"end": 635, |
|
"text": "Van de Cruys and Apidianaki, 2011;", |
|
"ref_id": "BIBREF73" |
|
}, |
|
{ |
|
"start": 636, |
|
"end": 662, |
|
"text": "Di Marco and Navigli, 2013", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 793, |
|
"end": 814, |
|
"text": "(Agirre et al., 2010;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 815, |
|
"end": 834, |
|
"text": "Guo and Diab, 2010;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 835, |
|
"end": 862, |
|
"text": "Ponzetto and Navigli, 2010;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 863, |
|
"end": 883, |
|
"text": "Miller et al., 2012;", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 884, |
|
"end": 904, |
|
"text": "Agirre et al., 2014)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1009, |
|
"end": 1025, |
|
"text": "(Mihalcea, 2005)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 1163, |
|
"end": 1189, |
|
"text": "(Sinha and Mihalcea, 2007;", |
|
"ref_id": "BIBREF67" |
|
}, |
|
{ |
|
"start": 1190, |
|
"end": 1215, |
|
"text": "Tsatsaronis et al., 2007;", |
|
"ref_id": "BIBREF72" |
|
}, |
|
{ |
|
"start": 1216, |
|
"end": 1239, |
|
"text": "Agirre and Soroa, 2009;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1240, |
|
"end": 1265, |
|
"text": "Navigli and Lapata, 2010)", |
|
"ref_id": "BIBREF49" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Sense Disambiguation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "One of the key steps of many knowledge-based WSD algorithms is the creation of a graph representing the semantic interpretations of the input text. Two main strategies to build this graph have been proposed: i) exploiting the direct connections, i.e., edges, between the considered sense candidates; ii) populating the graph according to (shortest) paths between them. In our approach we manage to unify these two strategies by automatically creating edges between sense candidates performing Random Walk with Restart (Tong et al., 2006) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 518, |
|
"end": 537, |
|
"text": "(Tong et al., 2006)", |
|
"ref_id": "BIBREF70" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Sense Disambiguation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The recent upsurge of interest in multilinguality has led to the development of cross-lingual and multilingual approaches to WSD (Lefever and Hoste, 2010; Lefever and Hoste, 2013; . Multilinguality has been exploited in different ways, e.g., by using parallel corpora to build multilingual contexts (Guo and Diab, 2010; Banea and Mihalcea, 2011; Lefever et al., 2011) or by means of ensemble methods which exploit complementary sense evidence from translations in different languages (Navigli and Ponzetto, 2012b) . In this work, we present a novel exploitation of the structural properties of a multilingual semantic network.", |
|
"cite_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 154, |
|
"text": "(Lefever and Hoste, 2010;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 155, |
|
"end": 179, |
|
"text": "Lefever and Hoste, 2013;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 299, |
|
"end": 319, |
|
"text": "(Guo and Diab, 2010;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 320, |
|
"end": 345, |
|
"text": "Banea and Mihalcea, 2011;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 346, |
|
"end": 367, |
|
"text": "Lefever et al., 2011)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 484, |
|
"end": 513, |
|
"text": "(Navigli and Ponzetto, 2012b)", |
|
"ref_id": "BIBREF51" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Sense Disambiguation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Entity Linking (Erbs et al., 2011; Rao et al., 2013; Cornolti et al., 2013) encompasses a set of similar tasks, which include Named Entity Disambiguation (NED), that is the task of linking entity mentions in a text to a knowledge base (Bunescu and Pasca, 2006; Cucerzan, 2007) , and Wikification, i.e., the automatic annotation of text by linking its relevant fragments of text to the appropriate Wikipedia articles. Mihalcea and Csomai (2007) were the first to tackle the Wikification task. In their approach they disambiguate each word in a sentence independently by exploiting the context in which it occurs. However, this approach is local in that it lacks a collective notion of coherence between the selected Wikipedia pages. To overcome this problem, Cucerzan (2007) introduced a global approach based on the simultaneous disambiguation of all the terms in a text and the use of lexical context to disambiguate the mentions. To maximize the semantic agreement Milne and Witten (2008) introduced the analysis of the semantic relations between the candidate senses and the unambiguous context, i.e., words with a single sense candidate. However, the performance of this algorithm depends heavily on the number of links incident to the target senses and on the availability of unambiguous words within the input text. To overcome this issue a novel class of approaches have been proposed (Kulkarni et al., 2009; Ratinov et al., 2011; Hoffart et al., 2011) that exploit global and local features. However, these systems either rely on a difficult NP-hard formalization of the problem which is infeasible for long text, or exploit popularity measures which are domain-dependent. In contrast, we show that the semantic network structure can be leveraged to obtain state-of-the-art performance by synergistically disambiguating both word senses and named entities at the same time.", |
|
"cite_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 34, |
|
"text": "(Erbs et al., 2011;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 35, |
|
"end": 52, |
|
"text": "Rao et al., 2013;", |
|
"ref_id": "BIBREF62" |
|
}, |
|
{ |
|
"start": 53, |
|
"end": 75, |
|
"text": "Cornolti et al., 2013)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 235, |
|
"end": 260, |
|
"text": "(Bunescu and Pasca, 2006;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 276, |
|
"text": "Cucerzan, 2007)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 417, |
|
"end": 443, |
|
"text": "Mihalcea and Csomai (2007)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 758, |
|
"end": 773, |
|
"text": "Cucerzan (2007)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 967, |
|
"end": 990, |
|
"text": "Milne and Witten (2008)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 1392, |
|
"end": 1415, |
|
"text": "(Kulkarni et al., 2009;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 1416, |
|
"end": 1437, |
|
"text": "Ratinov et al., 2011;", |
|
"ref_id": "BIBREF63" |
|
}, |
|
{ |
|
"start": 1438, |
|
"end": 1459, |
|
"text": "Hoffart et al., 2011)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Entity Linking", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Recently, the explosion of on-line social networking services, such as Twitter and Facebook, have contributed to the development of new methods for the efficient disambiguation of short texts (Ferragina and Scaiella, 2010; Hoffart et al., 2012; B\u00f6hm et al., 2012) . Thanks to a loose candidate identification technique coupled with a densest subgraph heuristic, we show that our approach is particularly suited for short and highly ambiguous text disambiguation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 192, |
|
"end": 222, |
|
"text": "(Ferragina and Scaiella, 2010;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 223, |
|
"end": 244, |
|
"text": "Hoffart et al., 2012;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 245, |
|
"end": 263, |
|
"text": "B\u00f6hm et al., 2012)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Entity Linking", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Our main goal is to bring together the two worlds of WSD and EL. On the one hand, this implies relaxing the constraint of a perfect association between mentions and meanings, which is, instead, assumed in WSD. On the other hand, this relaxation leads to the inherent difficulty of encoding a full-fledged sense inventory for EL. Our solution to this problem is to keep the set of candidate meanings for a given mention as open as possible (see Section 6), so as to enable high recall in linking partial mentions, while providing an effective method for handling this high ambiguity (see Section 7).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Best of Two Worlds", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "A key assumption of our work is that the lexicographic knowledge used in WSD is also useful for tackling the EL task, and vice versa the encyclopedic information utilized in EL helps disambiguate nominal mentions in a WSD setting. We enable the joint treatment of concepts and named entities by enforcing high coherence in our semantic interpretations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Best of Two Worlds", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Task. Our task is to disambiguate and link all nominal and named entity mentions occurring within a text. The linking task is performed by associating each mention with the most suitable entry of a given knowledge base. 1 We point out that our definition is unconstrained in terms of what to link, i.e., unlike Wikification and WSD, we can link overlapping fragments of text. For instance, given the text fragment Major League Soccer, we identify and disambiguate several different nominal and entity mentions: Major League Soccer, major league, league and soccer. In contrast to EL, we link not only named entity mentions, such as Major League Soccer, but also nominal mentions, e.g., major league, to their corresponding meanings in the knowledge base.", |
|
"cite_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 221, |
|
"text": "1", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "WSD and Entity Linking Together", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Babelfy. We provide a unified approach to WSD and entity linking in three steps:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "WSD and Entity Linking Together", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "1. Given a lexicalized semantic network, we associate with each vertex, i.e., either concept or named entity, a semantic signature, that is, a set of related vertices (Section 5). This is a preliminary step which needs to be performed only once, independently of the input text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "WSD and Entity Linking Together", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "2. Given a text, we extract all the linkable fragments from this text and, for each of them, list the possible meanings according to the semantic network (Section 6).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "WSD and Entity Linking Together", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "3. We create a graph-based semantic interpretation of the whole text by linking the candidate meanings of the extracted fragments using the previously-computed semantic signatures. We then extract a dense subgraph of this representation and select the best candidate meaning for each fragment (Section 7).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "WSD and Entity Linking Together", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Our approach requires the availability of a widecoverage semantic network which encodes structural and lexical information both of an encyclopedic and of a lexicographic kind. Although in principle any semantic network with these properties could be utilized, in our work we used the Babel-Net 2 1.1.1 semantic network (Navigli and Ponzetto, 2012a) since it is the largest multilingual knowledge base, obtained from the automatic seamless integration of Wikipedia 3 and WordNet (Fellbaum, 1998) . We consider BabelNet as a directed multigraph which contains both concepts and named entities as its vertices and a multiset of semantic relations as its edges. We leverage the multilingual lexicalizations of the vertices of BabelNet to identify mentions in the input text. For example, the entity FC Bayern Munich can be lexicalized in different languages, e.g., F.C. Bayern de M\u00fanich in Spanish, Die Roten in English and Bayern M\u00fcnchen in German, among others. As regards semantic relations, the only information we use is that of the end points, i.e., vertices, that these relations connect, while neglecting the relation type.", |
|
"cite_spans": [ |
|
{ |
|
"start": 319, |
|
"end": 348, |
|
"text": "(Navigli and Ponzetto, 2012a)", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 478, |
|
"end": 494, |
|
"text": "(Fellbaum, 1998)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Network", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "One of the major issues affecting both manuallycurated and automatically constructed semantic networks is data sparsity. For instance, we calculated that the average number of incident edges is roughly 10 in WordNet, 50 in BabelNet and 80 in YAGO2, to mention a few. Although automatically-built resources typically provide larger amounts of edges, two issues have to be taken into account: concepts which should be related might not be directly connected despite being structurally close within the network, and, vice versa, weakly-related or even unrelated concepts can be erroneously connected by an edge. For instance, in BabelNet we do not have an edge between playmaker and Thomas M\u00fcller, while we have an incorrect edge connecting FC Bayern Munich and Yellow Submarine (song). However, this crisp notion of relatedness can be overcome by exploiting the global structure of the semantic network, thereby obtaining a more precise and highercoverage measure of relatedness. We address this issue in two steps: first, we provide a structural weighting of the network's edges; second, for each vertex we create a set of related vertices using random walks with restart.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Building Semantic Signatures", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Structural weighting. Our first objective is to assign higher weights to edges which are involved in more densely connected areas of the directed network. To this end, inspired by the local clustering coefficient measure (Watts and Strogatz, 1998) and its recent success in Word Sense Induction (Di Marco and Navigli, 2013), we use directed triangles, i.e., directed cycles of length 3, and weight each edge (v, v ) by the number of directed triangles it occurs in:", |
|
"cite_spans": [ |
|
{ |
|
"start": 221, |
|
"end": 247, |
|
"text": "(Watts and Strogatz, 1998)", |
|
"ref_id": "BIBREF75" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Building Semantic Signatures", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "weight(v, v ) := |{(v, v , v ) : (v, v ),", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Building Semantic Signatures", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "(v , v ), (v , v) \u2208 E}| + 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Building Semantic Signatures", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We add one to each weight to ensure the highest degree of reachability in the network.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Building Semantic Signatures", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Random Walk with Restart. Our goal is to create a semantic signature (i.e., a set of highly related vertices) for each concept and named entity of the semantic network. To do this, we perform a Random Walk with Restart (RWR) (Tong et al., 2006) , that is, a stochastic process that starts from an initial vertex of the graph 4 and then, for a fixed number n of steps or until convergence, explores the graph by choosing the next vertex within the current neighborhood or by restarting from the initial vertex with a given, fixed restart probability \u03b1. For each edge (v, v ) in the network, we model the conditional probability P (v |v) as the normalized weight of the edge:", |
|
"cite_spans": [ |
|
{ |
|
"start": 225, |
|
"end": 244, |
|
"text": "(Tong et al., 2006)", |
|
"ref_id": "BIBREF70" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Building Semantic Signatures", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "P (v |v) = weight(v, v ) v \u2208V weight(v, v )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Building Semantic Signatures", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "where V is the set of vertices of the semantic network and weight(v, v ) is the function defined in Equation 1. We then run the RWR from each vertex v of the semantic network for a fixed number n of steps (we show in Algorithm 1 our RWR pseudocode). We keep track of the encountered vertices using the map counts, i.e., we increase the counter associated with vertex v in counts every time we hit v during a RWR started from v (see line 11). As a result, we obtain a frequency distribution over the whole set of concepts and entities. To eliminate weakly-related vertices we keep only those items that were hit at least \u03b7 times (see lines 16-18). Finally, we save the remaining vertices in the set semSign v which is the semantic signature of v (see line 19).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Building Semantic Signatures", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Algorithm 1 Random walk with restart.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Building Semantic Signatures", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "1: input: v, the starting vertex; \u03b1, the restart probability; n, the number of steps to be executed; P , the transition probabilities; \u03b7, the frequency threshold. 2: output: ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Building Semantic Signatures", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "semSign v , set of related vertices for v. 3: function RWR(v, \u03b1, n, P, \u03b7) 4: v := v", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Building Semantic Signatures", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "return semSign v = counts.keys()", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Building Semantic Signatures", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The creation of our set of semantic signatures, one for each vertex in the semantic network, is a preliminary step carried out once only before starting processing any input text. We now turn to the candidate identification and disambiguation steps.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Building Semantic Signatures", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Given a text as input, we apply part-of-speech tagging and identify the set F of all the textual fragments, i.e., all the sequences of words of maximum length five, which contain at least one noun and that are substrings of lexicalizations in BabelNet, i.e., those fragments that can potentially be linked to an entry in BabelNet. For each textual fragment f \u2208 F , i.e., a single-or multi-word expression of the input text, we look up the semantic network for candidate meanings, i.e., vertices that contain f or, only for named entities, a superstring of f as their lexicalization. For instance, for sentence (1) in the introduction, we identify the following textual fragments: Thomas, Mario, strikers, Munich. This output is obtained thanks to our loose candidate identification routine, i.e., based on superstring matching instead of exact matching, which, for instance, enables us to recognize the right candidate Mario Gomez for the mention Mario even if this named entity does not have Mario as one of its lexicalizations (for an analysis of the impact of this routine against the exact matching approach see the discussion in Section 9).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Identification", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Moreover, as we stated in Section 3, we allow overlapping fragments, e.g., for major league we recognize league and major league. We denote with cand(f ) the set of all the candidate meanings of fragment f . For instance, for the noun league we have that cand(league) contains among others the sport word sense and the TV series named entity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Identification", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Semantic interpretation graph. After the identification of fragments (F ) and their candidate meanings (cand(\u2022)), we create a directed graph G I = (V I , E I ) of the semantic interpretations of the input text. We show the pseudocode in Algorithm 2. V I contains all the candidate meanings of all fragments, that is,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "V I := {(v, f ) : v \u2208 cand(f ), f \u2208 F },", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "where f is a fragment of the input text and v is a candidate Babel synset that has a lexicalization which is equal to or is a superstring of f (see lines 4-8). The set of edges E I connects related meanings and is populated as follows: we add an edge from (v, f ) to (v , f ) if and only if f = f and v \u2208 semSign v (see lines 9-11). In other words, we connect two candidate meanings of different fragments if one is in the semantic signature of the other. For instance, we add an edge between (Mario Gomez, Mario) and (Thomas M\u00fcller, Thomas), while we do not add one between (Mario Gomez, Mario) and (Mario Basler, Mario) since these are two candidate meanings of the same fragment, i.e., Mario. In Figure 1 , we show an excerpt of our graph for sentence (1).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 699, |
|
"end": 707, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "At this point we have a graph-based representation of all the possible interpretations of the input text. In order to drastically reduce the degree of ambiguity while keeping the interpretation coherence as high as possible, we apply a novel densest subgraph heuristic (see line 12), whose description we defer to the next paragraph. The result is a subgraph which contains those semantic interpretations that are most coherent to each other. However, this subgraph might still contain multiple interpretations for the same fragment, and even unambiguous fragments which are not correct. Therefore, the final (Tom\u00e1s Mili\u00e1n, Thomas) step is the selection of the most suitable candidate meaning for each fragment f given a threshold \u03b8 to discard semantically unrelated candidate meanings. We score each meaning v \u2208 cand(f ) with its normalized weighted degree 5 in the densest subgraph:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "(Thomas M\u00fcller, Thomas) (forward, striker) (striker, striker) (FC Bayern Munich, Munich) (Munich, Munich) (Mario Adorf, Mario) (Mario Basler, Mario) (Mario Gomez, Mario)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "score((v, f )) = w (v,f ) \u2022 deg((v, f )) v \u2208 cand(f ) w (v ,f ) \u2022 deg((v , f ))", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "where w (v,f ) is the fraction of fragments the candidate meaning v connects to:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "w (v,f ) := |{f \u2208 F : \u2203v s.t. ((v, f ), (v , f )) or ((v , f ), (v, f )) \u2208 E I }| |F | \u2212 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The rationale behind this scoring function is to take into account both the semantic coherence, using a graph centrality measure among the candidate meanings, and the lexical coherence, in terms of the number of fragments a candidate relates to.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Finally, we link each f to the highest ranking candidate meaning v if score((v , f )) \u2265 \u03b8, where \u03b8 is a fixed threshold (see lines 14-18 of Algorithm 2). For instance, in sentence (1) and for the fragment Mario we select Mario Gomez as our final candidate meaning and link it to the fragment.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Linking by densest subgraph. We now illustrate our novel densest subgraph heuristic, used in line 12 of Algorithm 2, for reducing the level of ambiguity of the initial semantic interpretation graph G I . The main idea here is that the most suitable meanings of each text fragment will belong to the densest area of the graph. For instance, in Figure 1 the (candidate, fragment) pairs (Thomas M\u00fcller, Thomas), (Mario Gomez, Mario), (striker, striker) and (FC Bayern 5 We denote with deg(v) the overall number of incoming and outgoing edges, i.e., deg(", |
|
"cite_spans": [ |
|
{ |
|
"start": 465, |
|
"end": 466, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 343, |
|
"end": 351, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "v) := deg + (v) + deg \u2212 (v).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Algorithm 2 Candidate Disambiguation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "1: input: F , the fragments in the input text;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "semSign, the semantic signatures; \u00b5, ambiguity level to be reached; cand, fragments to candidate meanings. 2: output: selected, disambiguated fragments. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "G I := (V I , E I ) 6:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "for each fragment f \u2208 F do 7:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "for each candidate v \u2208 cand(f ) do 8: V I := V I \u222a {(v, f )} 9: for each ((v, f ), (v , f )) \u2208 V I \u00d7 V I do 10: if f = f and v \u2208 semSign v then", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "11:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "E I := E I \u222a {((v, f ), (v , f ))}", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "12:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "G I := DENSSUB(F, cand, G I , \u00b5) 13: selected := new M ap < String, Synset > 14: for each f \u2208 F s.t. \u2203(v, f ) \u2208 V I do 15: cand (f ) := {v : (v, f ) \u2208 V I } 16: v := arg max v\u2208cand (f ) score((v, f )) 17: if score((v , f )) \u2265 \u03b8 then 18: selected(f ) := v 19: return selected", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Munich, Munich) form a dense subgraph supporting their relevance for sentence (1).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The problem of identifying the densest subgraph of size at least k is NP-hard (Feige et al., 1999) . Therefore, we define a heuristic for k-partite graphs inspired by a 2-approximation greedy algorithm for arbitrary graphs (Charikar, 2000; Khuller and Saha, 2009) . Our adapted strategy for selecting a dense subgraph of G I is based on the iterative removal of low-coherence vertices, i.e., fragment interpretations. We show the pseudocode in Algorithm 3.", |
|
"cite_spans": [ |
|
{ |
|
"start": 78, |
|
"end": 98, |
|
"text": "(Feige et al., 1999)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 223, |
|
"end": 239, |
|
"text": "(Charikar, 2000;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 240, |
|
"end": 263, |
|
"text": "Khuller and Saha, 2009)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We start with the initial graph G", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "I at step t = 0 (see line 5). For each step t (lines 7-16), first, we identify the most ambiguous fragment f max , i.e., the one with the maximum number of candidate mean-Algorithm 3 Densest Subgraph. 1: input: F , the set of all fragments in the input text;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "cand, from fragments to candidate meanings; G", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "I , the full semantic interpretation graph; \u00b5, ambiguity level to be reached. 2: output: G I , a dense subgraph. 3: function DENSSUB(F, cand, G ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "G I := G (0) I 6:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "while true do 7:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "f max := arg max f \u2208F |{v : \u2203(v, f ) \u2208 V (t) I }| 8: if |{v : \u2203(v, f max ) \u2208 V (t) I }| \u2264 \u00b5 then 9: break; 10: v min := argmin v \u2208 cand(fmax) score((v, f max )) 11: V (t+1) I := V (t) I \\ {(v min , f max )} 12: E (t+1) I := E (t) I \u2229 V (t+1) I \u00d7 V (t+1) I 13: G (t+1) I := (V (t+1) I , E (t+1) I ) 14:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "if avgdeg(G return G I ings in the graph (see line 7). Next, we discard the weakest interpretation of the current fragment f max . To do so, we determine the lexical and semantic coherence of each candidate meaning (v, f max ) using Formula 2 (see line 10). We then remove from our graph G (t) I the lowest-coherence vertex (v min , f max ), i.e., the one whose score is minimum (see lines 11-13). For instance, in Figure 1 , f max is the fragment Mario and we have: score((Mario Gomez, Mario)) \u221d 3 3 \u2022 5 = 5, score((Mario Basler, Mario)) \u221d 1 3 \u2022 1 = 0.3 and score((Mario Adorf, Mario)) \u221d 2 3 \u2022 2 = 1.3, so we remove (Mario Basler, Mario) from the graph since its score is minimum.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 415, |
|
"end": 423, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We then move to the next step, i.e., we set t := t + 1 (see line 16) and repeat the low-coherence removal step. We stop when the number of remaining candidates for each fragment is below a threshold \u00b5, i.e., |{v :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "\u2203(v, f ) \u2208 V (t)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "I }| \u2264 \u00b5 \u2200f \u2208 F (see lines 8-9). During each iteration step t we compute the average degree of the current graph G ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "I ) = 2|E (t) I | |V (t) I |", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": ". Finally, we select as the densest subgraph of the initial semantic interpretation graph G I the graph G I that maximizes the average degree (see lines 14-15).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Candidate Disambiguation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Datasets. We carried out our experiments on six datasets, four for WSD and two for EL:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "\u2022 The SemEval-2013 task 12 dataset for multilingual WSD , which consists of 13 documents in different domains, available in 5 languages. For each language, all noun occurrences were annotated using BabelNet, thereby providing Wikipedia and WordNet annotations wherever applicable. The number of mentions to be disambiguated roughly ranges from 1K to 2K per language in the different setups.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "\u2022 The SemEval-2007 task 7 dataset for coarsegrained English all-words WSD (Navigli et al., 2007) . We take into account only nominal mentions obtaining a dataset containing 1107 nouns to be disambiguated using WordNet.", |
|
"cite_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 96, |
|
"text": "(Navigli et al., 2007)", |
|
"ref_id": "BIBREF52" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "\u2022 The SemEval-2007 task 17 dataset for finegrained English all-words WSD (Pradhan et al., 2007) . We considered only nominal mentions resulting in 158 nouns annotated with WordNet synsets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 73, |
|
"end": 95, |
|
"text": "(Pradhan et al., 2007)", |
|
"ref_id": "BIBREF61" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "\u2022 The Senseval-3 dataset for English all-words WSD (Snyder and Palmer, 2004) , which contains 899 nouns to be disambiguated using WordNet.", |
|
"cite_spans": [ |
|
{ |
|
"start": 51, |
|
"end": 76, |
|
"text": "(Snyder and Palmer, 2004)", |
|
"ref_id": "BIBREF68" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "\u2022 KORE50 (Hoffart et al., 2012) , which consists of 50 short English sentences (mean length of 14 words) with a total number of 144 mentions manually annotated using YAGO2, for which a Wikipedia mapping is available. This dataset was built with the idea of testing against a high level of ambiguity for the EL task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 31, |
|
"text": "(Hoffart et al., 2012)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Parameters. We fixed the parameters of RWR (Section 5) to the values \u03b1 = .85, \u03b7 = 100 and n = 1M which maximize F1 on a manually created tuning set made up of 10 gold-standard semantic signatures. We tuned our two disambiguation parameters \u00b5 = 10 and \u03b8 = 0.8 by optimizing F 1 on the trial dataset of the SemEval-2013 task on multilingual WSD . We used the same parameters on all the other WSD datasets. As for EL, we used the training part of AIDA-CoNLL (Hoffart et al., 2011) to set \u00b5 = 5 and \u03b8 = 0.0.", |
|
"cite_spans": [ |
|
{ |
|
"start": 455, |
|
"end": 477, |
|
"text": "(Hoffart et al., 2011)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Multilingual WSD. We evaluated our system on the SemEval-2013 task 12 by comparing it with the participating systems:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Systems", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "\u2022 UMCC-DLSI (Guti\u00e9rrez et al., 2013) a stateof-the-art Personalized PageRank-based approach that exploits the integration of different sources of knowledge, such as WordNet Domains/Affect (Strapparava and Valitutti, 2004) , SUMO (Zouaq et al., 2009) and the eXtended WordNet (Mihalcea and Moldovan, 2001 );", |
|
"cite_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 36, |
|
"text": "(Guti\u00e9rrez et al., 2013)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 188, |
|
"end": 221, |
|
"text": "(Strapparava and Valitutti, 2004)", |
|
"ref_id": "BIBREF69" |
|
}, |
|
{ |
|
"start": 229, |
|
"end": 249, |
|
"text": "(Zouaq et al., 2009)", |
|
"ref_id": "BIBREF77" |
|
}, |
|
{ |
|
"start": 275, |
|
"end": 303, |
|
"text": "(Mihalcea and Moldovan, 2001", |
|
"ref_id": "BIBREF43" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Systems", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "\u2022 DAEBAK! (Manion and Sainudiin, 2013) which performs WSD on the basis of peripheral diversity within subgraphs of BabelNet;", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 38, |
|
"text": "(Manion and Sainudiin, 2013)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Systems", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "\u2022 GETALP (Schwab et al., 2013) which uses an Ant Colony Optimization technique together with the classical measure of Lesk (1986) . We also compared with UKB w2w (Agirre and Soroa, 2009) , a state-of-the-art approach for knowledge-based WSD, based on Personalized PageRank (Haveliwala, 2002) . We used the same mapping from words to senses that we used in our approach, default parameters 7 and BabelNet as the input graph. Moreover, we compared our system with IMS (Zhong and Ng, 2010) , a state-of-theart supervised English WSD system which uses an SVM trained on sense-annotated corpora, such as SemCor (Miller et al., 1993) and DSO (Ng and Lee, 1996) , among others. We used the IMS model out-of-the-box with Most Frequent Sense (MFS) as backoff routine since the model obtained using the task trial data performed worse.", |
|
"cite_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 30, |
|
"text": "(Schwab et al., 2013)", |
|
"ref_id": "BIBREF65" |
|
}, |
|
{ |
|
"start": 118, |
|
"end": 129, |
|
"text": "Lesk (1986)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 162, |
|
"end": 186, |
|
"text": "(Agirre and Soroa, 2009)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 273, |
|
"end": 291, |
|
"text": "(Haveliwala, 2002)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 466, |
|
"end": 486, |
|
"text": "(Zhong and Ng, 2010)", |
|
"ref_id": "BIBREF76" |
|
}, |
|
{ |
|
"start": 606, |
|
"end": 627, |
|
"text": "(Miller et al., 1993)", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 632, |
|
"end": 654, |
|
"text": "DSO (Ng and Lee, 1996)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Systems", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "We followed the original task formulation and evaluated the synsets in three different settings, i.e., when using BabelNet senses, Wikipedia senses and WordNet senses, thanks to BabelNet being a superset of the other two inventories. We ran our system on a document-by-document basis, i.e., disambiguating each document at once, so as to test its effectiveness on long coherent texts. Performance was calculated in terms of F1 score. We also compared the systems with the MFS baseline computed for the three inventories .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Systems", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "Coarse-grained WSD. For the SemEval-2007 task 7 we compared our system with the two topranked approaches, i.e., NUS-PT (Chan et al., 2007) and UoR-SSI (Navigli, 2008) , which respectively exploited parallel texts and enriched semantic paths in a semantic network, the previously described UKB w2w system, 8 a knowledge-based WSD approach (Ponzetto and Navigli, 2010) which exploits an automatic extension of WordNet, and, as baseline, the MFS.", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 138, |
|
"text": "NUS-PT (Chan et al., 2007)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 151, |
|
"end": 166, |
|
"text": "(Navigli, 2008)", |
|
"ref_id": "BIBREF54" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Systems", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "Fine-grained WSD. For the remaining finegrained WSD datasets, i.e., Senseval-3 and SemEval-2007 task 17, we compared our approach with the previously described state-of-the-art systems UKB and IMS, and, as baseline, the MFS.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Systems", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "KORE50 and AIDA-CoNLL. For the KORE50 and AIDA-CoNLL datasets we compared our system with six approaches, including state-of-the-art ones (Hoffart et al., 2012; Cornolti et al., 2013) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 138, |
|
"end": 160, |
|
"text": "(Hoffart et al., 2012;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 161, |
|
"end": 183, |
|
"text": "Cornolti et al., 2013)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Systems", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "\u2022 MW, i.e., the Normalized Google Distance as defined by Milne and Witten (2008) ;", |
|
"cite_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 80, |
|
"text": "Milne and Witten (2008)", |
|
"ref_id": "BIBREF47" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Systems", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "\u2022 KPCS (Hoffart et al., 2012) , which calculates a Mutual Information weighted vector of keyphrases for each candidate and then uses the cosine similarity to obtain candidates' scores;", |
|
"cite_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 29, |
|
"text": "(Hoffart et al., 2012)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Systems", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "\u2022 KORE and its variants KORE LSH\u2212G and KORE LSH\u2212F (Hoffart et al., 2012) , based on similarity measures that exploit the overlap between phrases associated with the considered entities (KORE) and a hashing technique to reduce the space needed by the keyphrases associated with the entities (LSH-G, LSH-F);", |
|
"cite_spans": [ |
|
{ |
|
"start": 50, |
|
"end": 72, |
|
"text": "(Hoffart et al., 2012)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Systems", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "\u2022 Tagme 2.0 9 (Ferragina and Scaiella, 2012) which uses the relatedness measure defined Table 1 : F1 scores (percentages) of the participating systems of SemEval-2013 task 12 together with MFS, UKB w2w, IMS, our system and its ablated versions on the Senseval-3, SemEval-2007 task 17 and SemEval-2013 datasets. The first system which has a statistically significant difference from the top system is marked with (\u03c7 2 , p < 0.05).", |
|
"cite_spans": [ |
|
{ |
|
"start": 14, |
|
"end": 44, |
|
"text": "(Ferragina and Scaiella, 2012)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 95, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Systems", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "by Milne and Witten (2008) weighted with the commonness of a sense together with the keyphraseness measure defined by Mihalcea and Csomai (2007) to exploit the context around the target word;", |
|
"cite_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 26, |
|
"text": "Milne and Witten (2008)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 118, |
|
"end": 144, |
|
"text": "Mihalcea and Csomai (2007)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Systems", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "\u2022 Illinois Wikifier 10 (Cheng and Roth, 2013) which combines local features, such as commonness and TF-IDF between mentions and Wikipedia pages, with global coherence features based on Wikipedia links and relational inference;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Systems", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "\u2022 DBpedia Spotlight 11 (Mendes et al., 2011) which uses LingPipe's string matching algorithm implementation together with a weighted cosine similarity measure to recognize and disambiguate mentions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 23, |
|
"end": 44, |
|
"text": "(Mendes et al., 2011)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Systems", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "We also compared with UKB w2w, introduced above. Note that we could not use supervised systems, as the training data of AIDA-CoNLL covers less than half of the mentions used in the testing part and less than 10% of the entities considered in KORE50. To enable a fair comparison, we ran our system by restricting the BabelNet sense inventory of the target mentions to the English Wikipedia. As is customary in the literature, we calculated the systems' accuracy for both Entity Linking datasets. 10 We used the out-of-the-box Java API available from http://cogcomp.cs.illinois.edu/page/download view/Wikifier 11 We used the 2011 version of DBpedia Spotlight as it obtains better scores on the considered datasets in comparison to the new version (Daiber et al., 2013) . We used the out-of-thebox RESTful API available at http://spotlight.dbpedia.org", |
|
"cite_spans": [ |
|
{ |
|
"start": 495, |
|
"end": 497, |
|
"text": "10", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 608, |
|
"end": 610, |
|
"text": "11", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 745, |
|
"end": 766, |
|
"text": "(Daiber et al., 2013)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Systems", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "Multilingual WSD. In Table 1 we show the F1 performance on the SemEval-2013 task 12 for the three setups: WordNet, Wikipedia and BabelNet. Using BabelNet we surpass all systems on English and German and obtain performance comparable with the best systems on two other languages (UKB on Italian and UMCC-DLSI on Spanish). Using the WordNet sense inventory, our results are on a par with the best system, i.e., IMS. On Wikipedia our results range between 71.6% (French) and 87.4% F1 (English), i.e., more than 10 points higher than the current state of the art (UMCC-DLSI) in all 5 languages. As for the MFS baseline, which is known to be very competitive in WSD (Navigli, 2009) , we beat it in all setups except for German on Wikipedia. Interestingly, we surpass the WordNet MFS by 2.9 points, a significant result for a knowledge-based system (see also (Pilehvar and Navigli, 2014)).", |
|
"cite_spans": [ |
|
{ |
|
"start": 661, |
|
"end": 676, |
|
"text": "(Navigli, 2009)", |
|
"ref_id": "BIBREF55" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 28, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "Coarse-and fine-grained WSD. In Table 2 , we show the results of the systems on the SemEval-2007 coarse-grained WSD dataset. As can be seen, we obtain the second best result after Ponzetto and Navigli (2010) . In Table 1 (first two columns), we show the results of IMS and UKB on the Senseval-3 and SemEval-2007 task 17 datasets. We rank second on both datasets after IMS. However, the differences are not statistically significant. Moreover, Agirre et al. (2014, Table 5 ) note that using WordNet 3.0, instead of 1.7 or 2.1, to annotate these datasets can cause a more than one percent drop in performance. Table 2 : F1 score (percentages) on the SemEval-2007 task 7. The first system which has a statistically significant difference from the top system is marked with (\u03c7 2 , p < 0.05).", |
|
"cite_spans": [ |
|
{ |
|
"start": 180, |
|
"end": 207, |
|
"text": "Ponzetto and Navigli (2010)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 443, |
|
"end": 471, |
|
"text": "Agirre et al. (2014, Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 32, |
|
"end": 39, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 213, |
|
"end": 220, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 608, |
|
"end": 615, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "Entity Linking. In Table 3 we show the results on the two Entity Linking datasets, i.e., KORE50 and AIDA-CoNLL. Our system outperforms all other approaches, with KORE-LSH-G getting closest, and Tagme and Wikifier lagging behind on the KORE50 dataset. For the AIDA-CoNLL dataset we obtain the third best performance after MW and KPCS, however the difference is not statistically significant. We note the low performance of DBpedia Spotlight which, even if it achieves almost 100% precision on the identified mentions on both datasets, suffers from low recall due to its candidate identification step, confirming previous evaluations (Derczynski et al., 2013; Hakimov et al., 2012; Ludwig and Sack, 2011) . This problem becomes even more accentuated in the latest version of this system (Daiber et al., 2013) . Finally, UKB using BabelNet obtains low performance on EL, i.e., 19.4-10.5 points below the state of the art. This result is discussed below.", |
|
"cite_spans": [ |
|
{ |
|
"start": 632, |
|
"end": 657, |
|
"text": "(Derczynski et al., 2013;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 658, |
|
"end": 679, |
|
"text": "Hakimov et al., 2012;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 680, |
|
"end": 702, |
|
"text": "Ludwig and Sack, 2011)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 785, |
|
"end": 806, |
|
"text": "(Daiber et al., 2013)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 19, |
|
"end": 26, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "Discussion. The results obtained by UKB show that the high performance of our unified approach to EL and WSD is not just a mere artifact of the use of a rich multilingual semantic network, that is, Ba-belNet. In other words, it is not true that any graphbased algorithm could be applied to perform both EL and WSD at the same time equally well. This also shows that BabelNet by itself is not sufficient for achieving high performances for both tasks and that, instead, an appropriate processing of the structural and lexical information of the semantic network is needed. A manual analysis revealed that the main cause of error for UKB in the EL setup stems Table 3 : Accuracy (percentages) of state-of-the-art EL systems and our system on KORE50 and AIDA-CoNLL.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 658, |
|
"end": 665, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "The first system with a statistically significant difference from the top system is marked with (\u03c7 2 , p < 0.05).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "from its inability to enforce high coherence, e.g., by jointly disambiguating all the words, which is instead needed when considering the high level of ambiguity that we have in our semantic interpretation graph (Cucerzan, 2007) . For instance, for sentence (1) in the introduction, UKB disambiguates Thomas as a cricket player and Mario as the popular video game rather than the two well-known soccer players, and Munich as the German city, rather than the soccer team in which they play. Our approach, instead, by enforcing highly coherent semantic interpretations, correctly identifies all the soccer-related entities.", |
|
"cite_spans": [ |
|
{ |
|
"start": 212, |
|
"end": 228, |
|
"text": "(Cucerzan, 2007)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "In order to determine the need of our loose candidate identification heuristic (see Section 6), we compared the percentage of times a candidate set contains the correct entity against that obtained by an exact string matching between the mention and the sense inventory. On KORE50, our heuristic retrieves the correct entity 98.6% of the time vs. 42.4% when exact matching is used. This demonstrates the inadequacy of exact matching for EL, and the need for a comprehensive sense inventory, as is done in our approach.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "We also performed different ablation tests by experimenting with the following variants of our system (reported at the bottom of Tables 1, 2 and 3):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "\u2022 Babelfy using uniform distribution during the RWR to obtain the concepts' semantic signatures; this test assesses the impact of our weighting and edge creation strategy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "\u2022 Babelfy without performing the densest subgraph heuristic, i.e., when line 12 in Algorithm 2 is G I = G I , so as to verify the impact of identifying the most coherent interpretations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "\u2022 Babelfy applied to the BabelNet subgraph induced by the entire set of named entity vertices, for the EL task, and that induced by word senses only, for the WSD task; this test aims to stress the impact of our unified approach.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "\u2022 Babelfy applied on sentences instead of on whole documents. The component which has a smaller impact on the performance is our triangle-based weighting scheme. The main exception is on the smallest dataset, i.e., SemEval-2007 task 17, for which this version attains an improvement of 2.5 percentage points.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "Babelfy without the densest subgraph algorithm is the version which attains the lowest performances on the EL task, with a 9% performance drop on the KORE50 dataset, showing the need for a specially designed approach to cope with the high level of ambiguity that is encountered on this task. On the other hand, in the WSD datasets this version attains almost the same results as the full version, due to the lower number of candidate word senses.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "Babelfy applied on sentences instead of on whole documents shows a lower performance, confirming the significance of higher semantic coherence on whole documents (notwithstanding the two exceptions on the SemEval-2007 task 17 and on the SemEval-2013 German Wikipedia datasets).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "Finally, the version in which we restrict our system to named entities only (for EL) and concepts only (for WSD) consistently obtains lower results (notwithstanding the three exceptions on the Spanish SemEval-2013 task 12 using BabelNet and Wikipedia, and on the SemEval 2007 coarse-grained task). This highlights the benefit of our joint use of lexicographic and encyclopedic structured knowledge, on each of the two tasks. The 3.4% performance drop attained on KORE50 is of particular interest, since this dataset aims at testing performance on highly ambiguous mentions within short sentences. This indicates that the semantic analysis of small contexts can be improved by leveraging the coherence between concepts and named entities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "In this paper we presented Babelfy, a novel, integrated approach to Entity Linking and Word Sense Disambiguation, available at http://babelfy.org. Our joint solution is based on three key steps: i) the automatic creation of semantic signatures, i.e., related concepts and named entities, for each node in the reference semantic network; ii) the unconstrained identification of candidate meanings for all possible textual fragments; iii) linking based on a high-coherence densest subgraph algorithm. We used BabelNet 1.1.1 as our multilingual semantic network.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "10" |
|
}, |
|
{ |
|
"text": "Our graph-based approach exploits the semantic network structure to its advantage: two key features of BabelNet, that is, its multilinguality and its integration of lexicographic and encyclopedic knowledge, make it possible to run our general, unified approach on the two tasks of Entity Linking and WSD in any of the languages covered by the semantic network. However, we also demonstrated that Babel-Net in itself does not lead to state-of-the-art accuracy on both tasks, even when used in conjunction with a high-performance graph-based algorithm like Personalized PageRank. This shows the need for our novel unified approach to EL and WSD.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "10" |
|
}, |
|
{ |
|
"text": "At the core of our approach lies the effective treatment of the high degree of ambiguity of partial textual mentions by means of a 2-approximation algorithm for the densest subgraph problem, which enables us to output a semantic interpretation of the input text with drastically reduced ambiguity, as was previously done with SSI (Navigli, 2008) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 330, |
|
"end": 345, |
|
"text": "(Navigli, 2008)", |
|
"ref_id": "BIBREF54" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "10" |
|
}, |
|
{ |
|
"text": "Our experiments on six gold-standard datasets show the state-of-the-art performance of our approach, as well as its robustness across languages. Our evaluation also demonstrates that our approach fares well both on long texts, such as those of the WSD tasks, and short and highly-ambiguous sentences, such as the ones in KORE50. Finally, ablation tests and further analysis demonstrate that each component of our system is needed to contribute state-of-the-art performances on both EL and WSD.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "10" |
|
}, |
|
{ |
|
"text": "As future work, we plan to use Babelfy for information extraction, where semantics is taking the lead (Moro and Navigli, 2013) , and for the validation of semantic annotations (Vannella et al., 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 126, |
|
"text": "(Moro and Navigli, 2013)", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 176, |
|
"end": 199, |
|
"text": "(Vannella et al., 2014)", |
|
"ref_id": "BIBREF74" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "10" |
|
}, |
|
{ |
|
"text": "Transactions of the Association for Computational Linguistics, 2 (2014) 231-244. Action Editor: Noah Smith.Submitted 9/2013; Revised 1/2014; Published 5/2014. c 2014 Association for Computational Linguistics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Mentions which are not contained in the reference knowledge base are not taken into account.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://babelnet.org 3 http://www.wikipedia.org", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "RWR can be used with an initial set of vertices, however in this paper we use a single initial vertex.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 AIDA-CoNLL 6(Hoffart et al., 2011), which consists of 1392 English articles, for a total of roughly 35K named entity mentions annotated with YAGO concepts separated in development, training and test sets. We exploited the POS tags already available in the SemEval and Senseval datasets, while we used the Stanford POS tagger(Toutanova et al., 2003) for the English sentences in the last two datasets.6 We used AIDA-CoNLL as it is the most recent and largest available dataset for EL(Hachey et al., 2013). The TAC KBP datasets are available only to participants.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "./ukb wsd -D dict.txt -K kb.bin --ppr w2w ctx.txt", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We report the results as given byAgirre et al. (2014).9 We used the out-of-the-box RESTful API available at http://tagme.di.unipi.it", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The authors gratefully acknowledge the support of the ERC Starting Grant MultiJEDI No. 259234. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Personalizing PageRank for Word Sense Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aitor", |
|
"middle": [], |
|
"last": "Soroa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of EACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "33--41", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eneko Agirre and Aitor Soroa. 2009. Personalizing PageRank for Word Sense Disambiguation. In Proc. of EACL, pages 33-41.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Two graph-based algorithms for state-of-the-art WSD", |
|
"authors": [ |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Mart\u00ednez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proc. of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "585--593", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eneko Agirre, David Mart\u00ednez, Oier L\u00f3pez de Lacalle, and Aitor Soroa. 2006. Two graph-based algorithms for state-of-the-art WSD. In Proc. of EMNLP, pages 585-593.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Graph-based Word Sense Disambiguation of biomedical documents", |
|
"authors": [ |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aitor", |
|
"middle": [], |
|
"last": "Soroa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Stevenson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Bioinformatics", |
|
"volume": "26", |
|
"issue": "22", |
|
"pages": "2889--2896", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eneko Agirre, Aitor Soroa, and Mark Stevenson. 2010. Graph-based Word Sense Disambiguation of biomedi- cal documents. Bioinformatics, 26(22):2889-2896.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Random Walks for Knowledge-Based Word Sense Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oier", |
|
"middle": [], |
|
"last": "Lopez De Lacalle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aitor", |
|
"middle": [], |
|
"last": "Soroa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Computational Linguistics", |
|
"volume": "40", |
|
"issue": "1", |
|
"pages": "57--84", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eneko Agirre, Oier Lopez de Lacalle, and Aitor Soroa. 2014. Random Walks for Knowledge-Based Word Sense Disambiguation. Computational Linguistics, 40(1):57-84.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "DBpedia: A Nucleus for a Web of Open Data", |
|
"authors": [ |
|
{ |
|
"first": "S\u00f6ren", |
|
"middle": [], |
|
"last": "Auer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Bizer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georgi", |
|
"middle": [], |
|
"last": "Kobilarov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jens", |
|
"middle": [], |
|
"last": "Lehmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Cyganiak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [], |
|
"last": "Ives", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of ISWC/ASWC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "722--735", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S\u00f6ren Auer, Christian Bizer, Georgi Kobilarov, Jens Lehmann, Richard Cyganiak, and Zachary Ives. 2007. DBpedia: A Nucleus for a Web of Open Data. In Proc. of ISWC/ASWC, pages 722-735.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Word Sense Disambiguation with multilingual features", |
|
"authors": [ |
|
{ |
|
"first": "Carmen", |
|
"middle": [], |
|
"last": "Banea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of IWCS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "25--34", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Carmen Banea and Rada Mihalcea. 2011. Word Sense Disambiguation with multilingual features. In Proc. of IWCS, pages 25-34.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "LINDA: distributed web-ofdata-scale entity matching", |
|
"authors": [ |
|
{ |
|
"first": "Christoph", |
|
"middle": [], |
|
"last": "B\u00f6hm", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerard", |
|
"middle": [], |
|
"last": "De Melo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Naumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Weikum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. of CIKM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2104--2108", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christoph B\u00f6hm, Gerard de Melo, Felix Naumann, and Gerhard Weikum. 2012. LINDA: distributed web-of- data-scale entity matching. In Proc. of CIKM, pages 2104-2108.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Bayesian Word Sense Induction", |
|
"authors": [ |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Brody", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of EACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "103--111", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samuel Brody and Mirella Lapata. 2009. Bayesian Word Sense Induction. In Proc. of EACL, pages 103-111.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Using encyclopedic knowledge for named entity disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Razvan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marius", |
|
"middle": [], |
|
"last": "Bunescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pasca", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proc. of EACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Razvan C. Bunescu and Marius Pasca. 2006. Using en- cyclopedic knowledge for named entity disambigua- tion. In Proc. of EACL, pages 9-16.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "NUS-PT: Exploiting Parallel Texts for Word Sense Disambiguation in the English All-Words Tasks", |
|
"authors": [ |
|
{ |
|
"first": "Yee", |
|
"middle": [], |
|
"last": "Seng Chan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hwee Tou", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhi", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of SemEval-2007", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "253--256", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yee Seng Chan, Hwee Tou Ng, and Zhi Zhong. 2007. NUS-PT: Exploiting Parallel Texts for Word Sense Disambiguation in the English All-Words Tasks. In Proc. of SemEval-2007, pages 253-256.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Greedy approximation algorithms for finding dense components in a graph", |
|
"authors": [ |
|
{ |
|
"first": "Moses", |
|
"middle": [], |
|
"last": "Charikar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proc. of APPROX", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "84--95", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Moses Charikar. 2000. Greedy approximation algo- rithms for finding dense components in a graph. In Proc. of APPROX, pages 84-95.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Relational Inference for Wikification", |
|
"authors": [ |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1787--1796", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiao Cheng and Dan Roth. 2013. Relational Inference for Wikification. In Proc. of EMNLP, pages 1787- 1796.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "A framework for benchmarking entityannotation systems", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Cornolti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Ferragina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Massimiliano", |
|
"middle": [], |
|
"last": "Ciaramita", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. of WWW", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "249--260", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Cornolti, Paolo Ferragina, and Massimiliano Cia- ramita. 2013. A framework for benchmarking entity- annotation systems. In Proc. of WWW, pages 249- 260.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Large-Scale Named Entity Disambiguation Based on Wikipedia Data", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Silviu Cucerzan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "708--716", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Silviu Cucerzan. 2007. Large-Scale Named Entity Dis- ambiguation Based on Wikipedia Data. In Proc. of EMNLP-CoNLL, pages 708-716.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Improving efficiency and accuracy in multilingual entity extraction", |
|
"authors": [ |
|
{ |
|
"first": "Joachim", |
|
"middle": [], |
|
"last": "Daiber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Jakob", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Hokamp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pablo", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Mendes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. of I-Semantics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "121--124", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joachim Daiber, Max Jakob, Chris Hokamp, and Pablo N. Mendes. 2013. Improving efficiency and accuracy in multilingual entity extraction. In Proc. of I-Semantics, pages 121-124.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Microblog-genre noise and impact on semantic annotation accuracy", |
|
"authors": [ |
|
{ |
|
"first": "Leon", |
|
"middle": [], |
|
"last": "Derczynski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diana", |
|
"middle": [], |
|
"last": "Maynard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niraj", |
|
"middle": [], |
|
"last": "Aswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kalina", |
|
"middle": [], |
|
"last": "Bontcheva", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. of Hypertext", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "21--30", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leon Derczynski, Diana Maynard, Niraj Aswani, and Kalina Bontcheva. 2013. Microblog-genre noise and impact on semantic annotation accuracy. In Proc. of Hypertext, pages 21-30.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Clustering and Diversifying Web Search Results with Graph-Based Word Sense Induction", |
|
"authors": [ |
|
{ |
|
"first": "Antonio", |
|
"middle": [ |
|
"Di" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Computational Linguistics", |
|
"volume": "39", |
|
"issue": "3", |
|
"pages": "709--754", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antonio Di Marco and Roberto Navigli. 2013. Cluster- ing and Diversifying Web Search Results with Graph- Based Word Sense Induction. Computational Linguis- tics, 39(3):709-754.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Link discovery: A comprehensive analysis", |
|
"authors": [ |
|
{ |
|
"first": "Nicolai", |
|
"middle": [], |
|
"last": "Erbs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Torsten", |
|
"middle": [], |
|
"last": "Zesch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of ICSC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "83--86", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nicolai Erbs, Torsten Zesch, and Iryna Gurevych. 2011. Link discovery: A comprehensive analysis. In Proc. of ICSC, pages 83-86.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Machine Reading", |
|
"authors": [ |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michele", |
|
"middle": [], |
|
"last": "Banko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Cafarella", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proc. of AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1517--1519", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oren Etzioni, Michele Banko, and Michael J Cafarella. 2006. Machine Reading. In Proc. of AAAI, pages 1517-1519.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "The dense k-subgraph problem", |
|
"authors": [ |
|
{ |
|
"first": "Uriel", |
|
"middle": [], |
|
"last": "Feige", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guy", |
|
"middle": [], |
|
"last": "Kortsarz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Peleg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Algorithmica", |
|
"volume": "29", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Uriel Feige, Guy Kortsarz, and David Peleg. 1999. The dense k-subgraph problem. Algorithmica, 29:2001.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "WordNet: An Electronic Lexical Database", |
|
"authors": [ |
|
{ |
|
"first": "Christiane", |
|
"middle": [], |
|
"last": "Fellbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christiane Fellbaum. 1998. WordNet: An Electronic Lexical Database. MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "TAGME: On-the-fly Annotation of Short Text Fragments (by Wikipedia Entities)", |
|
"authors": [ |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Ferragina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ugo", |
|
"middle": [], |
|
"last": "Scaiella", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proc. of CIKM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1625--1628", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paolo Ferragina and Ugo Scaiella. 2010. TAGME: On-the-fly Annotation of Short Text Fragments (by Wikipedia Entities). In Proc. of CIKM, pages 1625- 1628.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Fast and Accurate Annotation of Short Texts with Wikipedia Pages", |
|
"authors": [ |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Ferragina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ugo", |
|
"middle": [], |
|
"last": "Scaiella", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "IEEE Software", |
|
"volume": "29", |
|
"issue": "1", |
|
"pages": "70--75", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paolo Ferragina and Ugo Scaiella. 2012. Fast and Accu- rate Annotation of Short Texts with Wikipedia Pages. IEEE Software, 29(1):70-75.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Combining Orthogonal Monolingual and Multilingual Sources of Evidence for All Words WSD", |
|
"authors": [ |
|
{ |
|
"first": "Weiwei", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Mona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proc. of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1542--1551", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Weiwei Guo and Mona T. Diab. 2010. Combining Orthogonal Monolingual and Multilingual Sources of Evidence for All Words WSD. In Proc. of ACL, pages 1542-1551.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "UMCC DLSI: Reinforcing a Ranking Algorithm with Sense Frequencies and Multidimensional Semantic Resources to solve Multilingual Word Sense Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Yoan", |
|
"middle": [], |
|
"last": "Guti\u00e9rrez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yenier", |
|
"middle": [], |
|
"last": "Casta\u00f1eda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Gonz\u00e1lez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rainel", |
|
"middle": [], |
|
"last": "Estrada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dennys", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Piug", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jose", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Abreu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roger", |
|
"middle": [], |
|
"last": "P\u00e9rez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Fern\u00e1ndez Orqu\u00edn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e9s", |
|
"middle": [], |
|
"last": "Montoyo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. of SemEval-2013", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "241--249", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoan Guti\u00e9rrez, Yenier Casta\u00f1eda, Andy Gonz\u00e1lez, Rainel Estrada, Dennys D. Piug, Jose I. Abreu, Roger P\u00e9rez, Antonio Fern\u00e1ndez Orqu\u00edn, Andr\u00e9s Montoyo, Rafael Mu\u00f1oz, and Franc Camara. 2013. UMCC DLSI: Reinforcing a Ranking Algorithm with Sense Frequencies and Multidimensional Semantic Resources to solve Multilingual Word Sense Disam- biguation. In Proc. of SemEval-2013, pages 241-249.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Evaluating Entity Linking with Wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Hachey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Nothman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Honnibal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Curran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Artificial Intelligence", |
|
"volume": "194", |
|
"issue": "", |
|
"pages": "130--150", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ben Hachey, Will Radford, Joel Nothman, Matthew Hon- nibal, and James R. Curran. 2013. Evaluating En- tity Linking with Wikipedia. Artificial Intelligence, 194:130-150.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Named entity recognition and disambiguation using linked data and graph-based centrality scoring", |
|
"authors": [ |
|
{ |
|
"first": "Sherzod", |
|
"middle": [], |
|
"last": "Hakimov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atilay", |
|
"middle": [], |
|
"last": "Salih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erdogan", |
|
"middle": [], |
|
"last": "Oto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dogdu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. of SWIM", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "1--4", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sherzod Hakimov, Salih Atilay Oto, and Erdogan Dogdu. 2012. Named entity recognition and disambiguation using linked data and graph-based centrality scoring. In Proc. of SWIM, pages 4:1-4:7.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Topic-sensitive PageRank", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Taher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Haveliwala", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proc. of WWW", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "517--526", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taher H. Haveliwala. 2002. Topic-sensitive PageRank. In Proc. of WWW, pages 517-526.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Robust disambiguation of named entities in text", |
|
"authors": [ |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Hoffart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohamed", |
|
"middle": [ |
|
"Amir" |
|
], |
|
"last": "Yosef", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilaria", |
|
"middle": [], |
|
"last": "Bordino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hagen", |
|
"middle": [], |
|
"last": "F\u00fcrstenau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manfred", |
|
"middle": [], |
|
"last": "Pinkal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Spaniol", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bilyana", |
|
"middle": [], |
|
"last": "Taneva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Thater", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Weikum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "782--792", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Johannes Hoffart, Mohamed Amir Yosef, Ilaria Bordino, Hagen F\u00fcrstenau, Manfred Pinkal, Marc Spaniol, Bilyana Taneva, Stefan Thater, and Gerhard Weikum. 2011. Robust disambiguation of named entities in text. In Proc. of EMNLP, pages 782-792.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "KORE: keyphrase overlap relatedness for entity disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Hoffart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Seufert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. of CIKM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "545--554", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Johannes Hoffart, Stephan Seufert, Dat Ba Nguyen, Mar- tin Theobald, and Gerhard Weikum. 2012. KORE: keyphrase overlap relatedness for entity disambigua- tion. In Proc. of CIKM, pages 545-554.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "YAGO2: A spatially and temporally enhanced knowledge base from Wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Hoffart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Fabian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus", |
|
"middle": [], |
|
"last": "Suchanek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Berberich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Weikum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Artificial Intelligence", |
|
"volume": "194", |
|
"issue": "", |
|
"pages": "28--61", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Johannes Hoffart, Fabian M Suchanek, Klaus Berberich, and Gerhard Weikum. 2013. YAGO2: A spatially and temporally enhanced knowledge base from Wikipedia. Artificial Intelligence, 194:28-61.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Collaboratively built semi-structured content and Artificial Intelligence: The story so far", |
|
"authors": [ |
|
{ |
|
"first": "Eduard", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simone", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Ponzetto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Artificial Intelligence", |
|
"volume": "194", |
|
"issue": "", |
|
"pages": "2--27", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eduard H. Hovy, Roberto Navigli, and Simone P. Ponzetto. 2013. Collaboratively built semi-structured content and Artificial Intelligence: The story so far. Artificial Intelligence, 194:2-27.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "On finding dense subgraphs", |
|
"authors": [ |
|
{ |
|
"first": "Samir", |
|
"middle": [], |
|
"last": "Khuller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barna", |
|
"middle": [], |
|
"last": "Saha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of ICALP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "597--608", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samir Khuller and Barna Saha. 2009. On finding dense subgraphs. In Proc. of ICALP, pages 597-608.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Collective Annotation of Wikipedia Entities in Web Text", |
|
"authors": [ |
|
{ |
|
"first": "Sayali", |
|
"middle": [], |
|
"last": "Kulkarni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amit", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ganesh", |
|
"middle": [], |
|
"last": "Ramakrishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soumen", |
|
"middle": [], |
|
"last": "Chakrabarti", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of KDD", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "457--466", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sayali Kulkarni, Amit Singh, Ganesh Ramakrishnan, and Soumen Chakrabarti. 2009. Collective Annotation of Wikipedia Entities in Web Text. In Proc. of KDD, pages 457-466.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Semeval-2010 task 3: Cross-lingual Word Sense Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Els", |
|
"middle": [], |
|
"last": "Lefever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V\u00e9ronique", |
|
"middle": [], |
|
"last": "Hoste", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proc. of SemEval-2010", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "15--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Els Lefever and V\u00e9ronique Hoste. 2010. Semeval-2010 task 3: Cross-lingual Word Sense Disambiguation. In Proc. of SemEval-2010, pages 15-20.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "SemEval-2013 Task 10: Cross-lingual Word Sense Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Els", |
|
"middle": [], |
|
"last": "Lefever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V\u00e9ronique", |
|
"middle": [], |
|
"last": "Hoste", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. of SemEval-2013", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "158--166", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Els Lefever and V\u00e9ronique Hoste. 2013. SemEval-2013 Task 10: Cross-lingual Word Sense Disambiguation. In Proc. of SemEval-2013, pages 158-166.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Parasense or how to use parallel corpora for Word Sense Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Els", |
|
"middle": [], |
|
"last": "Lefever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V\u00e9ronique", |
|
"middle": [], |
|
"last": "Hoste", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martine", |
|
"middle": [ |
|
"De" |
|
], |
|
"last": "Cock", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of ACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "317--322", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Els Lefever, V\u00e9ronique Hoste, and Martine De Cock. 2011. Parasense or how to use parallel corpora for Word Sense Disambiguation. In Proc. of ACL-HLT, pages 317-322.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Automatic Sense Disambiguation Using Machine Readable Dictionaries: How to Tell a Pine Cone from an Ice Cream Cone", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Lesk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1986, |
|
"venue": "Proc. of the International Conference on Systems Documentation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "24--26", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael E. Lesk. 1986. Automatic Sense Disambigua- tion Using Machine Readable Dictionaries: How to Tell a Pine Cone from an Ice Cream Cone. In Proc. of the International Conference on Systems Documen- tation, pages 24-26.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Named entity recognition for user-generated tags", |
|
"authors": [ |
|
{ |
|
"first": "Nadine", |
|
"middle": [], |
|
"last": "Ludwig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Harald", |
|
"middle": [], |
|
"last": "Sack", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of DEXA", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--181", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nadine Ludwig and Harald Sack. 2011. Named entity recognition for user-generated tags. In Proc. of DEXA, pages 177-181.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "SemEval-2010 task 14: Word sense induction & disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Suresh", |
|
"middle": [], |
|
"last": "Manandhar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Ioannis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dmitriy", |
|
"middle": [], |
|
"last": "Klapaftis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dligach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Sameer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pradhan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proc. of SemEval-2010", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "63--68", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Suresh Manandhar, Ioannis P. Klapaftis, Dmitriy Dli- gach, and Sameer S. Pradhan. 2010. SemEval-2010 task 14: Word sense induction & disambiguation. In Proc. of SemEval-2010, pages 63-68.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "DAE-BAK!: Peripheral Diversity for Multilingual Word Sense Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Steve", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Manion", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raazesh", |
|
"middle": [], |
|
"last": "Sainudiin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. of SemEval-2013", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "250--254", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steve L. Manion and Raazesh Sainudiin. 2013. DAE- BAK!: Peripheral Diversity for Multilingual Word Sense Disambiguation. In Proc. of SemEval-2013, pages 250-254.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "DBpedia spotlight: shedding light on the web of documents", |
|
"authors": [ |
|
{ |
|
"first": "Pablo", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Mendes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Jakob", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e9s", |
|
"middle": [], |
|
"last": "Garc\u00eda-Silva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Bizer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of I-Semantics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pablo N. Mendes, Max Jakob, Andr\u00e9s Garc\u00eda-Silva, and Christian Bizer. 2011. DBpedia spotlight: shed- ding light on the web of documents. In Proc. of I- Semantics, pages 1-8.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Wikify!: linking documents to encyclopedic knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andras", |
|
"middle": [], |
|
"last": "Csomai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of CIKM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "233--242", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rada Mihalcea and Andras Csomai. 2007. Wikify!: link- ing documents to encyclopedic knowledge. In Proc. of CIKM, pages 233-242.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Extended WordNet: Progress report", |
|
"authors": [ |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dan I Moldovan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proc. of NAACL Workshop on WordNet and Other Lexical Resources", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "95--100", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rada Mihalcea and Dan I Moldovan. 2001. Extended WordNet: Progress report. In Proc. of NAACL Work- shop on WordNet and Other Lexical Resources, pages 95-100.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Unsupervised large-vocabulary word sense disambiguation with graph-based algorithms for sequence data labeling", |
|
"authors": [ |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proc. of HLT/EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "411--418", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rada Mihalcea. 2005. Unsupervised large-vocabulary word sense disambiguation with graph-based algo- rithms for sequence data labeling. In Proc. of HLT/EMNLP, pages 411-418.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "A semantic concordance", |
|
"authors": [ |
|
{ |
|
"first": "George", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Miller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claudia", |
|
"middle": [], |
|
"last": "Leacock", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Randee", |
|
"middle": [], |
|
"last": "Tengi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ross", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Bunker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Proc. of HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "303--308", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George A. Miller, Claudia Leacock, Randee Tengi, and Ross T. Bunker. 1993. A semantic concordance. In Proc. of HLT, pages 303-308.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Using Distributional Similarity for Lexical Expansion in Knowledge-based Word Sense Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Tristan", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Biemann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Torsten", |
|
"middle": [], |
|
"last": "Zesch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. of COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1781--1796", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tristan Miller, Chris Biemann, Torsten Zesch, and Iryna Gurevych. 2012. Using Distributional Similarity for Lexical Expansion in Knowledge-based Word Sense Disambiguation. In Proc. of COLING, pages 1781- 1796.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Learning to link with Wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Milne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Witten", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proc. of CIKM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "509--518", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Milne and Ian H. Witten. 2008. Learning to link with Wikipedia. In Proc. of CIKM, pages 509-518.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Integrating Syntactic and Semantic Analysis into the Open Information Extraction Paradigm", |
|
"authors": [ |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Moro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. of IJCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2148--2154", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrea Moro and Roberto Navigli. 2013. Integrating Syntactic and Semantic Analysis into the Open Infor- mation Extraction Paradigm. In Proc. of IJCAI, pages 2148-2154.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "An Experimental Study of Graph Connectivity for Unsupervised Word Sense Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "TPAMI", |
|
"volume": "32", |
|
"issue": "4", |
|
"pages": "678--692", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roberto Navigli and Mirella Lapata. 2010. An Experi- mental Study of Graph Connectivity for Unsupervised Word Sense Disambiguation. TPAMI, 32(4):678-692.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Ba-belNet: The automatic construction, evaluation and application of a wide-coverage multilingual semantic network", |
|
"authors": [ |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simone", |
|
"middle": [ |
|
"Paolo" |
|
], |
|
"last": "Ponzetto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Artificial Intelligence", |
|
"volume": "193", |
|
"issue": "", |
|
"pages": "217--250", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roberto Navigli and Simone Paolo Ponzetto. 2012a. Ba- belNet: The automatic construction, evaluation and application of a wide-coverage multilingual semantic network. Artificial Intelligence, 193:217-250.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "Joining forces pays off: Multilingual Joint Word Sense Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simone", |
|
"middle": [ |
|
"Paolo" |
|
], |
|
"last": "Ponzetto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1399--1410", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roberto Navigli and Simone Paolo Ponzetto. 2012b. Joining forces pays off: Multilingual Joint Word Sense Disambiguation. In Proc. of EMNLP, pages 1399- 1410.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "SemEval-2007 Task 07: Coarse-Grained English All-Words Task", |
|
"authors": [ |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenneth", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Litkowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Orin", |
|
"middle": [], |
|
"last": "Hargraves", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of SemEval-2007", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "30--35", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roberto Navigli, Kenneth C. Litkowski, and Orin Har- graves. 2007. SemEval-2007 Task 07: Coarse- Grained English All-Words Task. In Proc. of SemEval-2007, pages 30-35.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "SemEval-2013 Task 12: Multilingual Word Sense Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Jurgens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniele", |
|
"middle": [], |
|
"last": "Vannella", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. of SemEval-2013", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "222--231", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roberto Navigli, David Jurgens, and Daniele Vannella. 2013. SemEval-2013 Task 12: Multilingual Word Sense Disambiguation. In Proc. of SemEval-2013, pages 222-231.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "A structural approach to the automatic adjudication of word sense disagreements", |
|
"authors": [ |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Natural Language Engineering", |
|
"volume": "14", |
|
"issue": "4", |
|
"pages": "293--310", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roberto Navigli. 2008. A structural approach to the automatic adjudication of word sense disagreements. Natural Language Engineering, 14(4):293-310.", |
|
"links": null |
|
}, |
|
"BIBREF55": { |
|
"ref_id": "b55", |
|
"title": "Word Sense Disambiguation: A survey", |
|
"authors": [ |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "ACM Computing Surveys", |
|
"volume": "41", |
|
"issue": "2", |
|
"pages": "1--69", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roberto Navigli. 2009. Word Sense Disambiguation: A survey. ACM Computing Surveys, 41(2):1-69.", |
|
"links": null |
|
}, |
|
"BIBREF56": { |
|
"ref_id": "b56", |
|
"title": "A Quick Tour of Word Sense Disambiguation, Induction and Related Approaches", |
|
"authors": [ |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. of SOFSEM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "115--129", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roberto Navigli. 2012. A Quick Tour of Word Sense Disambiguation, Induction and Related Approaches. In Proc. of SOFSEM, pages 115-129.", |
|
"links": null |
|
}, |
|
"BIBREF57": { |
|
"ref_id": "b57", |
|
"title": "Integrating multiple knowledge sources to disambiguate word sense: An exemplar-based approach", |
|
"authors": [ |
|
{ |
|
"first": "Tou", |
|
"middle": [], |
|
"last": "Hwee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hian Beng", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Proc. of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "40--47", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hwee Tou Ng and Hian Beng Lee. 1996. Integrat- ing multiple knowledge sources to disambiguate word sense: An exemplar-based approach. In Proc. of ACL, pages 40-47.", |
|
"links": null |
|
}, |
|
"BIBREF58": { |
|
"ref_id": "b58", |
|
"title": "A Large-scale Pseudoword-based Evaluation Framework for State-of-the-Art Word Sense Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Taher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pilehvar", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohammad Taher Pilehvar and Roberto Navigli. 2014. A Large-scale Pseudoword-based Evaluation Frame- work for State-of-the-Art Word Sense Disambigua- tion. Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF60": { |
|
"ref_id": "b60", |
|
"title": "Knowledge-rich Word Sense Disambiguation rivaling supervised system", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Proc. of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1522--1531", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Knowledge-rich Word Sense Disambiguation rivaling supervised system. In Proc. of ACL, pages 1522-1531.", |
|
"links": null |
|
}, |
|
"BIBREF61": { |
|
"ref_id": "b61", |
|
"title": "SemEval-2007 task 17: English lexical sample, SRL and all words", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Sameer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Pradhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dmitriy", |
|
"middle": [], |
|
"last": "Loper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martha", |
|
"middle": [], |
|
"last": "Dligach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of SemEval-2007", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "87--92", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sameer S. Pradhan, Edward Loper, Dmitriy Dligach, and Martha Palmer. 2007. SemEval-2007 task 17: En- glish lexical sample, SRL and all words. In Proc. of SemEval-2007, pages 87-92. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF62": { |
|
"ref_id": "b62", |
|
"title": "Entity Linking: Finding Extracted Entities in a Knowledge Base", |
|
"authors": [ |
|
{ |
|
"first": "Delip", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Mcnamee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Dredze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Multi-source, Multilingual Information Extraction and Summarization, Theory and Applications of Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "93--115", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Delip Rao, Paul McNamee, and Mark Dredze. 2013. En- tity Linking: Finding Extracted Entities in a Knowl- edge Base. In Multi-source, Multilingual Information Extraction and Summarization, Theory and Applica- tions of Natural Language Processing, pages 93-115. Springer Berlin Heidelberg.", |
|
"links": null |
|
}, |
|
"BIBREF63": { |
|
"ref_id": "b63", |
|
"title": "Local and Global Algorithms for Disambiguation to Wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Lev-Arie", |
|
"middle": [], |
|
"last": "Ratinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Doug", |
|
"middle": [], |
|
"last": "Downey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Anderson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1375--1384", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lev-Arie Ratinov, Dan Roth, Doug Downey, and Mike Anderson. 2011. Local and Global Algorithms for Disambiguation to Wikipedia. In Proc. of ACL, pages 1375-1384.", |
|
"links": null |
|
}, |
|
"BIBREF64": { |
|
"ref_id": "b64", |
|
"title": "Turing's dream and the knowledge challenge", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Lenhart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Schubert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proc. of NCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1534--1538", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lenhart K. Schubert. 2006. Turing's dream and the knowledge challenge. In Proc. of NCAI, pages 1534- 1538.", |
|
"links": null |
|
}, |
|
"BIBREF65": { |
|
"ref_id": "b65", |
|
"title": "GETALP System: Propagation of a Lesk Measure through an Ant Colony Algorithm", |
|
"authors": [ |
|
{ |
|
"first": "Didier", |
|
"middle": [], |
|
"last": "Schwab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andon", |
|
"middle": [], |
|
"last": "Tchechmedjiev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00e9r\u00f4me", |
|
"middle": [], |
|
"last": "Goulian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Nasiruddin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gilles", |
|
"middle": [], |
|
"last": "S\u00e9rasset", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Herv\u00e9", |
|
"middle": [], |
|
"last": "Blanchon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. of SemEval-2013", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "232--240", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Didier Schwab, Andon Tchechmedjiev, J\u00e9r\u00f4me Goulian, Mohammad Nasiruddin, Gilles S\u00e9rasset, and Herv\u00e9 Blanchon. 2013. GETALP System: Propagation of a Lesk Measure through an Ant Colony Algorithm. In Proc. of SemEval-2013, pages 232-240.", |
|
"links": null |
|
}, |
|
"BIBREF66": { |
|
"ref_id": "b66", |
|
"title": "Coarse to Fine Grained Sense Disambiguation in Wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Hui", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Razvan", |
|
"middle": [], |
|
"last": "Bunescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. of *SEM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "22--31", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hui Shen, Razvan Bunescu, and Rada Mihalcea. 2013. Coarse to Fine Grained Sense Disambiguation in Wikipedia. In Proc. of *SEM, pages 22-31.", |
|
"links": null |
|
}, |
|
"BIBREF67": { |
|
"ref_id": "b67", |
|
"title": "Unsupervised Graph-based Word Sense Disambiguation Using Measures of Word Semantic Similarity", |
|
"authors": [ |
|
{ |
|
"first": "Ravi", |
|
"middle": [], |
|
"last": "Sinha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of ICSC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "363--369", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ravi Sinha and Rada Mihalcea. 2007. Unsupervised Graph-based Word Sense Disambiguation Using Mea- sures of Word Semantic Similarity. In Proc. of ICSC, pages 363-369.", |
|
"links": null |
|
}, |
|
"BIBREF68": { |
|
"ref_id": "b68", |
|
"title": "The English all-words task", |
|
"authors": [ |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Snyder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martha", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proc. of Senseval-3", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "41--43", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benjamin Snyder and Martha Palmer. 2004. The English all-words task. In Proc. of Senseval-3, pages 41-43.", |
|
"links": null |
|
}, |
|
"BIBREF69": { |
|
"ref_id": "b69", |
|
"title": "Word-Net Affect: an Affective Extension of WordNet", |
|
"authors": [ |
|
{ |
|
"first": "Carlo", |
|
"middle": [], |
|
"last": "Strapparava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Valitutti", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proc. of LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1083--1086", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Carlo Strapparava and Alessandro Valitutti. 2004. Word- Net Affect: an Affective Extension of WordNet. In Proc. of LREC, pages 1083-1086.", |
|
"links": null |
|
}, |
|
"BIBREF70": { |
|
"ref_id": "b70", |
|
"title": "Fast Random Walk with Restart and Its Applications", |
|
"authors": [ |
|
{ |
|
"first": "Hanghang", |
|
"middle": [], |
|
"last": "Tong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christos", |
|
"middle": [], |
|
"last": "Faloutsos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jia-Yu", |
|
"middle": [], |
|
"last": "Pan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proc. of ICDM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "613--622", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hanghang Tong, Christos Faloutsos, and Jia-Yu Pan. 2006. Fast Random Walk with Restart and Its Appli- cations. In Proc. of ICDM, pages 613-622.", |
|
"links": null |
|
}, |
|
"BIBREF71": { |
|
"ref_id": "b71", |
|
"title": "Feature-rich part-of-speech tagging with a cyclic dependency network", |
|
"authors": [ |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoram", |
|
"middle": [], |
|
"last": "Singer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proc. of NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "173--180", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kristina Toutanova, Dan Klein, Christopher D. Manning, and Yoram Singer. 2003. Feature-rich part-of-speech tagging with a cyclic dependency network. In Proc. of NAACL-HLT, pages 173-180.", |
|
"links": null |
|
}, |
|
"BIBREF72": { |
|
"ref_id": "b72", |
|
"title": "Word Sense Disambiguation with Spreading Activation Networks Generated from Thesauri", |
|
"authors": [ |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Tsatsaronis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of IJCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1725--1730", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George Tsatsaronis, Michalis Vazirgiannis, and Ion An- droutsopoulos. 2007. Word Sense Disambiguation with Spreading Activation Networks Generated from Thesauri. In Proc. of IJCAI, pages 1725-1730.", |
|
"links": null |
|
}, |
|
"BIBREF73": { |
|
"ref_id": "b73", |
|
"title": "Latent Semantic Word Sense Induction and Disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Van De Cruys", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marianna", |
|
"middle": [], |
|
"last": "Apidianaki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1476--1485", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tim Van de Cruys and Marianna Apidianaki. 2011. La- tent Semantic Word Sense Induction and Disambigua- tion. In Proc. of ACL, pages 1476-1485.", |
|
"links": null |
|
}, |
|
"BIBREF74": { |
|
"ref_id": "b74", |
|
"title": "Validating and Extending Semantic Knowledge Bases using Video Games with a Purpose", |
|
"authors": [ |
|
{ |
|
"first": "Daniele", |
|
"middle": [], |
|
"last": "Vannella", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Jurgens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniele", |
|
"middle": [], |
|
"last": "Scarfini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Domenico", |
|
"middle": [], |
|
"last": "Toscani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proc. of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniele Vannella, David Jurgens, Daniele Scarfini, Domenico Toscani, and Roberto Navigli. 2014. Vali- dating and Extending Semantic Knowledge Bases us- ing Video Games with a Purpose. In Proc. of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF75": { |
|
"ref_id": "b75", |
|
"title": "Collective dynamics of 'small-world' networks", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Duncan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Watts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Strogatz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Nature", |
|
"volume": "393", |
|
"issue": "6684", |
|
"pages": "409--419", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Duncan J. Watts and Steven H. Strogatz. 1998. Col- lective dynamics of 'small-world' networks. Nature, 393(6684):409-10.", |
|
"links": null |
|
}, |
|
"BIBREF76": { |
|
"ref_id": "b76", |
|
"title": "It Makes Sense: A Wide-Coverage Word Sense Disambiguation System for Free Text", |
|
"authors": [ |
|
{ |
|
"first": "Zhi", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hwee Tou", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proc. of ACL (Demo)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "78--83", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhi Zhong and Hwee Tou Ng. 2010. It Makes Sense: A Wide-Coverage Word Sense Disambiguation System for Free Text. In Proc. of ACL (Demo), pages 78-83.", |
|
"links": null |
|
}, |
|
"BIBREF77": { |
|
"ref_id": "b77", |
|
"title": "A SUMO-based Semantic Analysis for Knowledge Extraction", |
|
"authors": [ |
|
{ |
|
"first": "Amal", |
|
"middle": [], |
|
"last": "Zouaq", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Gagnon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benoit", |
|
"middle": [], |
|
"last": "Ozell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc of LTC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amal Zouaq, Michel Gagnon, and Benoit Ozell. 2009. A SUMO-based Semantic Analysis for Knowledge Ex- traction. In Proc of LTC.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "n \u2212 1 16: for each v in counts.keys() do 17: if counts[v ] < \u03b7 then 18: remove v from counts.keys() 19:" |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "An excerpt of the semantic interpretation graph automatically built for the sentence Thomas and Mario are strikers playing in Munich (the edges connecting the correct meanings are in bold)." |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "3: function DISAMB(F, semSign, \u00b5, cand) 4: V I := \u2205; E I := \u2205 5:" |
|
} |
|
} |
|
} |
|
} |