|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T11:09:43.168536Z" |
|
}, |
|
"title": "ENTYFI: A System for Fine-grained Entity Typing in Fictional Texts", |
|
"authors": [ |
|
{ |
|
"first": "Xuan", |
|
"middle": [], |
|
"last": "Cuong", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Chu", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Razniewski", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Weikum", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Fiction and fantasy are archetypes of long-tail domains that lack suitable NLP methodologies and tools. We present ENTYFI, a web-based system for fine-grained typing of entity mentions in fictional texts. It builds on 205 automatically induced high-quality type systems for popular fictional domains, and provides recommendations towards reference type systems for given input texts. Users can exploit the richness and diversity of these reference type systems for fine-grained supervised typing, in addition, they can choose among and combine four other typing modules: pre-trained real-world models, unsupervised dependency-based typing, knowledge base lookups, and constraint-based candidate consolidation. The demonstrator is available at https://d5demos.mpi-inf.mpg. de/entyfi. Mention Settings Default (Ref. universes + all modules) Default without type consolidation Only real-world typing Elladan & Elrohir men, hybrid peoples, elves of rivendell, real world,", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Fiction and fantasy are archetypes of long-tail domains that lack suitable NLP methodologies and tools. We present ENTYFI, a web-based system for fine-grained typing of entity mentions in fictional texts. It builds on 205 automatically induced high-quality type systems for popular fictional domains, and provides recommendations towards reference type systems for given input texts. Users can exploit the richness and diversity of these reference type systems for fine-grained supervised typing, in addition, they can choose among and combine four other typing modules: pre-trained real-world models, unsupervised dependency-based typing, knowledge base lookups, and constraint-based candidate consolidation. The demonstrator is available at https://d5demos.mpi-inf.mpg. de/entyfi. Mention Settings Default (Ref. universes + all modules) Default without type consolidation Only real-world typing Elladan & Elrohir men, hybrid peoples, elves of rivendell, real world,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Motivation and Problem. Entity types are a core building block of current knowledge bases (KBs) and valuable for many natural language processing tasks, such as coreference resolution, relation extraction and question answering (Lee et al., 2006; Carlson et al., 2010; Recasens et al., 2013) . Context-based entity typing, the task of assigning semantic types for mentions of entities in textual contexts (e.g., musician, politician, location or battle) therefore has become an important NLP task. While traditional methods often use coarse-grained classes, such as person, location, organization and misc, as targets, recent methods try to classify entities into finergrained types, from hundreds to thousands of them, yet all limited to variants of the real world, like from Wikipedia or news (Lee et al., 2006; Ling and Weld, 2012; Corro et al., 2015; Choi et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 228, |
|
"end": 246, |
|
"text": "(Lee et al., 2006;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 268, |
|
"text": "Carlson et al., 2010;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 269, |
|
"end": 291, |
|
"text": "Recasens et al., 2013)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 795, |
|
"end": 813, |
|
"text": "(Lee et al., 2006;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 814, |
|
"end": 834, |
|
"text": "Ling and Weld, 2012;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 835, |
|
"end": 854, |
|
"text": "Corro et al., 2015;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 855, |
|
"end": 873, |
|
"text": "Choi et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Entity type information plays an even more important role in literary texts from fictional domains.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Fiction and fantasy are core parts of human culture, spanning from traditional folks and myths into books, movies, TV series and games. People have created sophisticated fictional universes such as the Marvel Universe, DC Comics, Middle Earth or Harry Potter. These universes include entities, social structures, and events that are completely different from the real world. Appropriate entity typing for these universes is a prerequisite for several end-user applications. For example, a Game of Thrones fan may want to query for House Stark members who are Faceless Men or which character is both a Warg and a Greenseer. On the other hand, an analyst may want to compare social structures between different mythologies or formations of different civilizations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "State-of-the-art methods for entity typing mostly use supervised models trained on Wikipedia content, and only focus on news and similar real-world texts. Due to low coverage of Wikipedia on fictional domains, these methods are thus not sufficient for literary texts. For example, for the following sentence from Lord of the Rings: \"After Melkor's defeat in the First Age, Sauron became the second Dark Lord and strove to conquer Arda by creating the Rings\" state-of-the-art entity typing methods only return few coarse types for entities, such as person for SAURON and MELKOR or location for FIRST AGE and ARDA. Moreover, existing methods typically produce predictions for each individual mention, so that different mentions of the same entity may be assigned incompatible types, e.g., ARDA may be predicted as person and location in different contexts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Contribution. The prototype system presented in this demo paper, ENTYFI (fine-grained ENtity TYping on FIctional texts, see Chu et al. (2020) for full details) overcomes the outlined limitations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 124, |
|
"end": 141, |
|
"text": "Chu et al. (2020)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "ENTYFI supports long input texts from any kind of literature, as well as texts from standard domains (e.g., news). With the sample text above, ENTYFI is able to predict more specific and meaningful types for entity mentions:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "MELKOR: Ainur, Villain", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "FIRST AGE: Eras, Time SAURON: Maiar, Villain DARK LORD: Ainur, Title RINGS: Jewelry, Magic Things ARDA: Location", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To address the lack of reference types, ENTYFI leverages the content of fan communities on Wikia.com, from which 205 reference type systems are induced. Given an input text, ENTYFI then retrieves the most relevant reference type systems and uses them as typing targets. By combining supervised typing method with unsupervised pattern extraction and knowledge base lookups, suitable type candidates are identified. To resolve inconsistencies among candidates, ENTYFI utilizes an integer linear programming (ILP) based consolidation stage.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Outline. The following section describes the architecture of ENTYFI with the approach underlying its main components. The demonstration is illustrated afterwards through its graphical user interface. Our demonstration system is available at: https://d5demos.mpi-inf.mpg. de/entyfi. We also provide a screencast video demonstrating our system, at: https://youtu. be/g_ESaONagFQ. Figure 1 : Overview of the architecture of ENTYFI (Chu et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 428, |
|
"end": 446, |
|
"text": "(Chu et al., 2020)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 378, |
|
"end": 386, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Breaking Bad) and video games (e.g. League of Legends, Pokemon).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Each universe in Wikia is organized similarly to Wikipedia, such that they contain entities and categories that can be used to distill reference type systems. We adopt techniques from the TiFi system (Chu et al., 2019) to clean and structure Wikia categories. We remove noisy categories (e.g. metacategories) by using a dictionary-based method. To ensure connectedness of taxonomies, we integrate the category networks with WordNet (WN) by linking the categories to the most similar WN synsets. The similarity is computed between the context of the category (e.g., description, super/sub categories) and the gloss of the WN synset (Chu et al., 2019). Resulting type systems typically contain between 700 to 10,000 types per universe.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Given an input text, the goal of this step is to find the most relevant universes among the reference universes. Each reference universe is represented by its entities and entity type system. We compute the cosine similarity between the TF-IDF vectors of the input and each universe. The top-ranked reference universes and their type systems are then used for mention typing (section 2.4).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference Universe Ranking", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "To detect entity mentions in the input text, we rely on a BIOES tagging scheme. Inspired by He et al. (2017) from the field of semantic role labeling, we design a BiLSTM network with embeddings and POS tags as input, highway connections between layers to avoid vanishing gradients (Zhang et al., 2016) , and recurrent dropout to avoid over-fitting (Gal and Ghahramani, 2016) . The output is then put into a decoding step by using dynamic programming to select the tag sequence with maximum score that satisfies the BIOES constraints. The de-coding step does not add more complexity to the training.", |
|
"cite_spans": [ |
|
{ |
|
"start": 92, |
|
"end": 108, |
|
"text": "He et al. (2017)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 281, |
|
"end": 301, |
|
"text": "(Zhang et al., 2016)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 348, |
|
"end": 374, |
|
"text": "(Gal and Ghahramani, 2016)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mention Detection", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "We produce type candidates for mentions by using a combination of supervised, unsupervised and lookup approaches.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mention Typing", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Supervised Fiction Types. Given an entity mention and its textual context, we approach typing as multiclass classification problem. The mention representation is the average of all embeddings of tokens in the mention. The context representation is a combination of left and right context around the mention. The contexts are encoded by using BiL-STM models (Graves, 2012) and then put into attention layer to learn the weight factors (Shimaoka et al., 2017) . Mention and context representations are concatenated and passed to the final logistic regression layer with cross entropy loss function to predict the type candidates.", |
|
"cite_spans": [ |
|
{ |
|
"start": 357, |
|
"end": 371, |
|
"text": "(Graves, 2012)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 434, |
|
"end": 457, |
|
"text": "(Shimaoka et al., 2017)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mention Typing", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Target Classes. There are two kinds of target classes: (i) general types -7 disjunct high-level WordNet types that we manually chose, mirroring existing coarse typing systems: living thing, location, organization, object, time, event, substance, (ii) top-performing typestypes from reference type systems. Due to a large number of types as well as insufficient training data, predicting all types in the type systems is not effective. Therefore, for each reference universe, we predict those types for which, on withheld test data, at least 0.8 F1-score was achieved. This results in an average of 75 types per reference universe.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mention Typing", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Supervised Real-world Types. Although fictional universes contain fantasy contents, many of them reflect our real-world, for instance, House of Cards, a satire of American politics. Even fictional stories like Game of Thrones or Lord of the Rings contain types presented in real world, such as King or Battle. To leverage this overlap, we incorporate the Wikipedia-and news-trained typing model from Choi et al. (2018) , which is able to predict up to 10,331 real-world types.", |
|
"cite_spans": [ |
|
{ |
|
"start": 400, |
|
"end": 418, |
|
"text": "Choi et al. (2018)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mention Typing", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Unsupervised Typing. Along with supervised technique, we use a pattern-based method to extract type candidates which appear explicitly in contexts for mentions. We use 36 manually crafted Hearst-style patterns for type extraction (Seitner et al., 2016) . Moreover, from dependency parsing, a noun phrase can be considered as a type candidate if there exists a noun compound modifier (nn) between the noun phrase and the given mention. In the case of candidate types appearing in the mention itself, we extract the head word of the mention and consider it as a candidate if it appears as a noun in WordNet. For example, given the text Queen Cersei was the twentieth ruler of the Seven Kingdoms, queen and kingdom are type candidates for the mentions CERSEI and SEVEN KINGDOMS, respectively. KB Lookup. Using top-ranked universes from section 2.2 as basis for the lookup, we map entity mentions to entities in reference universes by using lexical matching. The types of entities in corresponding type systems then become type candidates for the given mentions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 230, |
|
"end": 252, |
|
"text": "(Seitner et al., 2016)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mention Typing", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Using multiple universes as reference and typing in long texts may produce incompatibilities in predictions. For example, SARUMAN, a wizard in Lord of the Rings can be predicted as a white walker using the model learnt from Game of Thrones. To resolve possible inconsistencies, we rely on a consolidation step that uses an integer linear programming (ILP) model. The model captures several constraints, including disjointness, hierarchical coherence, cardinality limit and soft correlations (Chu et al., 2020). ILP Model. Given an entity mention e with a list of type candidates with corresponding weights, a decision variable T i is defined for each type candidate t i . T i = 1 if e belongs to t i , otherwise, T i = 0. With the constraints mentioned above, the objective function is: maximize", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Type Consolidation", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "\u03b1 i T i * w i + (1 \u2212 \u03b1) i,j T i * T j * v ij subject to T i + T j \u2264 1 \u2200(t i , t j ) \u2208 D T i \u2212 T j \u2264 0 \u2200(t i , t j ) \u2208 H i T i \u2264 \u03b4", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Type Consolidation", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "where w i is the weight of the type candidate t i , \u03b1 is a hyper parameter, v ij is Pearson correlation coefficient between a type pair (t i , t j ), D is the set of disjoint type pairs, H is the set of (transitive) hyponym pairs (t i , t j ) -t i is the (transitive) hyponym of t j , and \u03b4 is the threshold for the cardinality limit. Figure 2 : ENTYFI Web interface.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 335, |
|
"end": 343, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Type Consolidation", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "The ENTYFI system is deployed online at https: //d5demos.mpi-inf.mpg.de/entyfi. A screencast video, which demonstrates ENTYFI, is also uploaded at https://youtu.be/g_ESaONagFQ. Input. The web interface allows users to enter a text as input. To give a better experience, we provide various sample texts from three different sources: Wikia, books and fan fiction 2 . With each source, users can try with either texts from Lord of the Rings and Game of Thrones or random texts, as well as some cross-overs between different universes written by fans. Output. Given an input text, users can choose different typing modules to run. The output is the input text marked by entity mentions and their predicted types. The system also shows the predicted types with their aggregate scores and the typing modules from which the types are extracted. Figure 2 shows an example input and output of the ENTYFI system. Typing module selector. ENTYFI includes several typing modules, among which users can choose. If only the real-world typing module is chosen, the system runs typing on the text immediately, using one of the existing typing models which are able to predict up to 112 real-world types (Shimaoka et al., 2017) or 10,331 types (Choi et al., 2018) . Note: If the later model is selected to run the real-world typing, it requires more time to load the pre-trained embeddings (Pennington et al., 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 1186, |
|
"end": 1209, |
|
"text": "(Shimaoka et al., 2017)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1226, |
|
"end": 1245, |
|
"text": "(Choi et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1372, |
|
"end": 1397, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 838, |
|
"end": 846, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Web Interface", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "On the other hand, if supervised fiction typing or KB lookup typing are chosen, the system computes the similarity between the given text and reference universes from the database. With the default option, the type system of the most related universe is being used as targets for typing, while with the alternative case, users can choose different universes and use their type systems as targets. Users are also able to decide whether the consolidation step is executed or not.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Web Interface", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Exploration of reference universes. ENTYFI builds on 205 automatically induced high-quality type systems for popular fictional domains. Along with top 5 most relevant universes showing up with similarity scores, users can also choose other universes in the database. For a better overview, with each universe, we provide a short description about the universe and a hyperlink to its Wikia source. Figure 3 show an example of reference universes presented in the demonstration.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 397, |
|
"end": 405, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Web Interface", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Logs. To help users understand how the system works inside, we provide a log box that shows which step is running at the backend, step by step, along with timing information (Figure 4) .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 174, |
|
"end": 184, |
|
"text": "(Figure 4)", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Web Interface", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "A Song of Ice and Fire is a series of epic fantasy novels written by American novelist and screenwriter George R.R. Martin. The story of A Song of Ice and Fire takes place in a fictional world, primarily upon a continent called Westeros but also on a large landmass to the east, known as Essos. Most of the characters are human but as the series progresses other races are introduced, such as the cold and menacing Others from the far North and fire-breathing dragons from the East, both races thought to be extinct. There are three principal storylines in the series...", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Web Interface", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Adding More Universes Figure 3 : ENTYFI Reference Universes.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 30, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Link to Wikia", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A common use of entity typing is as building block of more comprehensive NLP pipelines that perform tasks such as entity linking, relation extraction or question answering. We envision that ENTYFI could strengthen such pipelines considerably (see also extrinsic evaluation in (Chu et al., 2020) ). Yet to illustrate its workings in isolation, in the following, we present a direct expert end-user application of entity typing in fictional texts.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 276, |
|
"end": 294, |
|
"text": "(Chu et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Demonstration Experience", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Suppose a literature analyst is doing research on a collection of unfamiliar short stories from fanfiction.net. Their goal is to understand the setting of each story, to answer questions such as what the stories are about (e.g. politics or supernatural), what types of characters the authors create, finding all instances of a type or a combination of types (e.g. female elves) or to do further analysis like if female elves are more frequent than male elves and if there are patterns regarding where female villains appear mostly. Due to time constraints, the analyst cannot read all of stories manually. Instead of that, they can run ENTYFI on each story to extract the entity type system automatically. For instance, to analyze the story Time Can't Heal Wounds Like These 3 , the analyst would paste the introduction of the story into the web interface of ENTYFI.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Demonstration Experience", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\"Elladan and Elrohir are captured along with their mother, and in the pits below the unforgiving Redhorn one twin finds his final resting place. In a series of devastating events Imladris loose one of its princes and its lady. But everything is not over yet, and those left behind must lean to cope and fight on.\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Demonstration Experience", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "3 https://www.fanfiction.net/s/13484688/1/Time-Can-t-Heal-Wounds-Like-These Since they have no prior knowledge on the setting, they could let ENTYFI propose related universes for typing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Demonstration Experience", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "After computing the similarity between the input and the reference universes from the database, ENTYFI would then propose The Lord of the Rings, Vampires Diaries, Kid Icarus, Twin Peaks and Crossfire as top 5 reference universes, respectively. The analyst may consider The Lord of the Rings and Vampires Diaries, top 2 in ranking, of particular interest, and in addition, select the universe Forgotten Realms, because that is influential in their literary domain. The analyst would then run ENTYFI with default settings, and get a list of entities with their predicted types as results. They could then see that ELLADAN and ELROHIR are recognized as living thing, elves, hybrid people and characters, while REDHORN as living thing, villains, servants of morgoth, and IMLADRIS as location, kingdoms, landforms and elven cities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Demonstration Experience", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "They could then decide to rerun the analysis with reference universes The Lord of the Rings and Vampires Diaries but without running type consolidation. By ignoring this module, the number of predicted types for each entity increases. Especially, ELLADAN & EHROHIR now are classified as living thing, elves, characters, but also location and organization. Similarly, REDHORN belongs to both living thing and places, while IMLADRIS is both a kingdom and a devastating event. Apparently, these incompatibilities in predictions appear when the system does not run type consolidation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Demonstration Experience", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The analyst may wonder how the system performs when no reference universe is being used. By only selecting the real-world typing module (Choi et al., 2018) , the predicted types for EL- Imladris kingdoms, location, realms, landforms, places, elven cities, eriador, elven realms, mordor, etc. kingdoms, location, realms, arda, landforms, places, continents, organization, elven cities, etc. city, writing, setting, castle, clan, location, character, eleven, etc. The results show not only incompatible predictions, but also that the existing typing model in the real world domain lacks coverage on fictional domains. By using a database of fictional universes as reference, ENTYFI is able to fill these gaps, predict fictional types in a fine-grained level and remove incompatibilities in the final results. From this interaction, the literature analyst could conclude that the story is much related to The Lord of the Rings, which might help them to draw parallels and direct further manual investigations. Table 1 shows the result of this demonstration experience in details.", |
|
"cite_spans": [ |
|
{ |
|
"start": 136, |
|
"end": 155, |
|
"text": "(Choi et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 186, |
|
"end": 389, |
|
"text": "Imladris kingdoms, location, realms, landforms, places, elven cities, eriador, elven realms, mordor, etc. kingdoms, location, realms, arda, landforms, places, continents, organization, elven cities, etc.", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1007, |
|
"end": 1015, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Demonstration Experience", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Earliest approaches for entity typing are based on manually designed patterns (e.g., Hearst patterns (Hearst, 1992)) to extract explicit type candidates in given texts. These pattern-based approaches can achieve good precision, but their recall is low, and they are difficult to scale up. Traditional named-entity recognition methods used both rule-based and supervised techniques to recognize and assign entity mentions into few coarse classes like person, location and organization (Sang and De Meulder, 2003; Finkel et al., 2005; Collobert et al., 2011; Lample et al., 2016) . Recently, fine-grained namedentity recognition and typing are getting more attention (Ling and Weld, 2012; Corro et al., 2015; Shimaoka et al., 2017; Choi et al., 2018) . Ling and Weld (2012) use a classic linear classifier to classify the mentions into a set of 112 types. At much larger scale, FINET (Corro et al., 2015) uses 16k types from the WordNet taxonomy as the targets for entity typing. FINET is a combination of pattern-based, mention-based and verb-based extractors to extract both explicit and implicit type candidates for the mentions from the contexts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 484, |
|
"end": 511, |
|
"text": "(Sang and De Meulder, 2003;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 512, |
|
"end": 532, |
|
"text": "Finkel et al., 2005;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 533, |
|
"end": 556, |
|
"text": "Collobert et al., 2011;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 557, |
|
"end": 577, |
|
"text": "Lample et al., 2016)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 665, |
|
"end": 686, |
|
"text": "(Ling and Weld, 2012;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 687, |
|
"end": 706, |
|
"text": "Corro et al., 2015;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 707, |
|
"end": 729, |
|
"text": "Shimaoka et al., 2017;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 730, |
|
"end": 748, |
|
"text": "Choi et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 751, |
|
"end": 771, |
|
"text": "Ling and Weld (2012)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 876, |
|
"end": 902, |
|
"text": "FINET (Corro et al., 2015)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "With the development of deep learning, many neural methods have been proposed (Dong et al., 2015; Shimaoka et al., 2017; Choi et al., 2018; Xu et al., 2018) . Shimaoka et al. (2017) propose a neural network with LSTM and attention mechanisms to encode representations of a mention's contexts. Recently, Choi et al. (2018) use distant supervision to collect a training dataset which includes over 10k types. The model is trained with a multi-task objective function that aims to classify entity mentions into three levels: general (9 types), fine-grained (112 types) and ultra-fine (10201 types).", |
|
"cite_spans": [ |
|
{ |
|
"start": 78, |
|
"end": 97, |
|
"text": "(Dong et al., 2015;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 98, |
|
"end": 120, |
|
"text": "Shimaoka et al., 2017;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 121, |
|
"end": 139, |
|
"text": "Choi et al., 2018;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 140, |
|
"end": 156, |
|
"text": "Xu et al., 2018)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 159, |
|
"end": 181, |
|
"text": "Shimaoka et al. (2017)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 303, |
|
"end": 321, |
|
"text": "Choi et al. (2018)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "While most existing methods focus on entity mentions with single contexts (e.g. a sentence), ENTYFI attempts to work on long texts (e.g., a chapter of a book). By proposing a combination of supervised and unsupervised approaches, with a following consolidation step, ENTYFI is able to predict types for entity mentions based on different contexts, without producing incompatibilities in predictions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Many web demo systems for entity typing have been built, such as Stanford NER 4 , displaCy NER 5 and AllenNLP 6 . However, these systems all predict only a few coarse and real world types (4-16 types). ENTYFI is the first attempt to entity typing at a fine-grained level for fictional texts. In a related problem, the richness of Wikia has been utilized for entity linking and question answering (Gao and Cucerzan, 2017; Maqsud et al., 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 396, |
|
"end": 420, |
|
"text": "(Gao and Cucerzan, 2017;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 421, |
|
"end": 441, |
|
"text": "Maqsud et al., 2014)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We have presented ENTYFI, an illustrative demonstration system for domain-specific and long-tail typing. We hope ENTYFI will prove useful both to language and cultural research, and to NLP researchers interested in understanding the challenges and opportunities in long-tail typing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "https://www.fanfiction.net/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://nlp.stanford.edu:8080/ner/ 5 https://explosion.ai/demos/displacy-ent 6 https://demo.allennlp.org/named-entity-recognition", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Coupled semi-supervised learning for information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Carlson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Betteridge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Richard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom M", |
|
"middle": [], |
|
"last": "Estevam R Hruschka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "WSDM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew Carlson, Justin Betteridge, Richard C Wang, Estevam R Hruschka Jr, and Tom M Mitchell. 2010. Coupled semi-supervised learning for information extraction. In WSDM.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Ultra-fine entity typing", |
|
"authors": [ |
|
{ |
|
"first": "Eunsol", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eunsol Choi, Omer Levy, Yejin Choi, and Luke Zettle- moyer. 2018. Ultra-fine entity typing. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "TiFi: Taxonomy induction for fictional domains", |
|
"authors": [ |
|
{ |
|
"first": "Xuan", |
|
"middle": [], |
|
"last": "Cuong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Chu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Razniewski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Weikum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "The Web Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cuong Xuan Chu, Simon Razniewski, and Gerhard Weikum. 2019. TiFi: Taxonomy induction for fic- tional domains. In The Web Conference.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "ENTYFI: Entity typing in fictional texts", |
|
"authors": [ |
|
{ |
|
"first": "Xuan", |
|
"middle": [], |
|
"last": "Cuong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Chu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Razniewski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Weikum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "WSDM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cuong Xuan Chu, Simon Razniewski, and Gerhard Weikum. 2020. ENTYFI: Entity typing in fictional texts. In WSDM.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Natural language processing (almost) from scratch", |
|
"authors": [ |
|
{ |
|
"first": "Ronan", |
|
"middle": [], |
|
"last": "Collobert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L\u00e9on", |
|
"middle": [], |
|
"last": "Bottou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Karlen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Koray", |
|
"middle": [], |
|
"last": "Kavukcuoglu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pavel", |
|
"middle": [], |
|
"last": "Kuksa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "JMLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ronan Collobert, Jason Weston, L\u00e9on Bottou, Michael Karlen, Koray Kavukcuoglu, and Pavel Kuksa. 2011. Natural language processing (almost) from scratch. In JMLR.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Finet: Context-aware fine-grained named entity typing", |
|
"authors": [ |
|
{ |
|
"first": "Luciano", |
|
"middle": [], |
|
"last": "Del Corro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abdalghani", |
|
"middle": [], |
|
"last": "Abujabal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rainer", |
|
"middle": [], |
|
"last": "Gemulla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Weikum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luciano del Corro, Abdalghani Abujabal, Rainer Gemulla, and Gerhard Weikum. 2015. Finet: Context-aware fine-grained named entity typing. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A hybrid neural model for type classification of entity mentions", |
|
"authors": [ |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Furu", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ke", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "IJCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Li Dong, Furu Wei, Hong Sun, Ming Zhou, and Ke Xu. 2015. A hybrid neural model for type classification of entity mentions. In IJCAI.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Incorporating non-local information into information extraction systems by gibbs sampling", |
|
"authors": [ |
|
{ |
|
"first": "Jenny", |
|
"middle": [ |
|
"Rose" |
|
], |
|
"last": "Finkel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trond", |
|
"middle": [], |
|
"last": "Grenager", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jenny Rose Finkel, Trond Grenager, and Christopher Manning. 2005. Incorporating non-local informa- tion into information extraction systems by gibbs sampling. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A theoretically grounded application of dropout in recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Yarin", |
|
"middle": [], |
|
"last": "Gal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zoubin", |
|
"middle": [], |
|
"last": "Ghahramani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yarin Gal and Zoubin Ghahramani. 2016. A theoret- ically grounded application of dropout in recurrent neural networks. In NIPS.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Entity linking to one thousand knowledge bases", |
|
"authors": [ |
|
{ |
|
"first": "Ning", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Silviu", |
|
"middle": [], |
|
"last": "Cucerzan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ECIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ning Gao and Silviu Cucerzan. 2017. Entity linking to one thousand knowledge bases. In ECIR.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Supervised sequence labelling", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Graves", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Supervised sequence labelling with recurrent neural networks", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Graves. 2012. Supervised sequence labelling. In Supervised sequence labelling with recurrent neural networks.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Deep semantic role labeling: What works and what's next", |
|
"authors": [ |
|
{ |
|
"first": "Luheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luheng He, Kenton Lee, Mike Lewis, and Luke Zettle- moyer. 2017. Deep semantic role labeling: What works and what's next. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Automatic acquisition of hyponyms from large text corpora", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Marti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hearst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marti A Hearst. 1992. Automatic acquisition of hy- ponyms from large text corpora. In COLING.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Neural architectures for named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miguel", |
|
"middle": [], |
|
"last": "Ballesteros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandeep", |
|
"middle": [], |
|
"last": "Subramanian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazuya", |
|
"middle": [], |
|
"last": "Kawakami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "NAACL HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample, Miguel Ballesteros, Sandeep Sub- ramanian, Kazuya Kawakami, and Chris Dyer. 2016. Neural architectures for named entity recognition. In NAACL HLT.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Finegrained named entity recognition using conditional random fields for question answering", |
|
"authors": [ |
|
{ |
|
"first": "Changki", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi-Gyu", |
|
"middle": [], |
|
"last": "Hwang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hyo-Jung", |
|
"middle": [], |
|
"last": "Oh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soojong", |
|
"middle": [], |
|
"last": "Lim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeong", |
|
"middle": [], |
|
"last": "Heo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chung-Hee", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hyeon-Jin", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ji-Hyun", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myung-Gil", |
|
"middle": [], |
|
"last": "Jang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Asia Information Retrieval Symposium", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Changki Lee, Yi-Gyu Hwang, Hyo-Jung Oh, Soojong Lim, Jeong Heo, Chung-Hee Lee, Hyeon-Jin Kim, Ji-Hyun Wang, and Myung-Gil Jang. 2006. Fine- grained named entity recognition using conditional random fields for question answering. In Asia Infor- mation Retrieval Symposium.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Fine-grained entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "Ling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Daniel S Weld", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiao Ling and Daniel S Weld. 2012. Fine-grained en- tity recognition. In AAAI.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Nerdle: Topic-specific question answering using wikia seeds", |
|
"authors": [ |
|
{ |
|
"first": "Umar", |
|
"middle": [], |
|
"last": "Maqsud", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Arnold", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "H\u00fclfenhaus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Akbik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Umar Maqsud, Sebastian Arnold, Michael H\u00fclfenhaus, and Alan Akbik. 2014. Nerdle: Topic-specific ques- tion answering using wikia seeds. In COLING.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word rep- resentation. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "The life and death of discourse entities: Identifying singleton mentions", |
|
"authors": [ |
|
{ |
|
"first": "Marta", |
|
"middle": [], |
|
"last": "Recasens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie-Catherine", |
|
"middle": [], |
|
"last": "De Marneffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Potts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marta Recasens, Marie-Catherine de Marneffe, and Christopher Potts. 2013. The life and death of dis- course entities: Identifying singleton mentions. In NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Introduction to the CoNLL-2003 shared task: Languageindependent named entity recognition. arXiv", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Erik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fien", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "De Meulder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik F Sang and Fien De Meulder. 2003. Introduc- tion to the CoNLL-2003 shared task: Language- independent named entity recognition. arXiv.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A large database of hypernymy relations extracted from the web", |
|
"authors": [ |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Seitner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Bizer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Eckert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefano", |
|
"middle": [], |
|
"last": "Faralli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Meusel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heiko", |
|
"middle": [], |
|
"last": "Paulheim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simone", |
|
"middle": [ |
|
"Paolo" |
|
], |
|
"last": "Ponzetto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julian Seitner, Christian Bizer, Kai Eckert, Stefano Faralli, Robert Meusel, Heiko Paulheim, and Si- mone Paolo Ponzetto. 2016. A large database of hy- pernymy relations extracted from the web. In LREC.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Neural architectures for fine-grained entity type classification", |
|
"authors": [ |
|
{ |
|
"first": "Sonse", |
|
"middle": [], |
|
"last": "Shimaoka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pontus", |
|
"middle": [], |
|
"last": "Stenetorp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kentaro", |
|
"middle": [], |
|
"last": "Inui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "EACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sonse Shimaoka, Pontus Stenetorp, Kentaro Inui, and Sebastian Riedel. 2017. Neural architectures for fine-grained entity type classification. In EACL.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Metic: Multi-instance entity typing from corpus", |
|
"authors": [ |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zheng", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luyang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bin", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanghua", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deqing", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "CIKM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bo Xu, Zheng Luo, Luyang Huang, Bin Liang, Yanghua Xiao, Deqing Yang, and Wei Wang. 2018. Metic: Multi-instance entity typing from corpus. In CIKM.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Highway long short-term memory rnns for distant speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guoguo", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaisheng", |
|
"middle": [], |
|
"last": "Yaco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjeev", |
|
"middle": [], |
|
"last": "Khudanpur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Glass", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu Zhang, Guoguo Chen, Dong Yu, Kaisheng Yaco, Sanjeev Khudanpur, and James Glass. 2016. High- way long short-term memory rnns for distant speech recognition. In ICASSP.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "ENTYFI Logs.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "Results of ENTYFI on different settings. & ELROHIR would change to athlete, god, body part, arm, etc. REDHORN now becomes a city, god, tribe and even an act, while IMLADRIS is a city, writing, setting and castle.", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |