ACL-OCL / Base_JSON /prefixT /json /textgraphs /2021.textgraphs-1.14.json
Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "2021",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T07:58:06.255560Z"
},
"title": "Fine-grained General Entity Typing in German using GermaNet",
"authors": [
{
"first": "Sabine",
"middle": [],
"last": "Weber",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of Edinburgh",
"location": {}
},
"email": "[email protected]"
},
{
"first": "Mark",
"middle": [],
"last": "Steedman",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of Edinburgh",
"location": {}
},
"email": "[email protected]"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Fine-grained entity typing is important to tasks like relation extraction and knowledge base construction. We find however, that finegrained entity typing systems perform poorly on general entities (e.g. \"ex-president\") as compared to named entities (e.g. \"Barack Obama\"). This is due to a lack of general entities in existing training data sets. We show that this problem can be mitigated by automatically generating training data from WordNets. We use a German WordNet equivalent, Ger-maNet, to automatically generate training data for German general entity typing. We use this data to supplement named entity data to train a neural fine-grained entity typing system. This leads to a 10% improvement in accuracy of the prediction of level 1 FIGER types for German general entities, while decreasing named entity type prediction accuracy by only 1%.",
"pdf_parse": {
"paper_id": "2021",
"_pdf_hash": "",
"abstract": [
{
"text": "Fine-grained entity typing is important to tasks like relation extraction and knowledge base construction. We find however, that finegrained entity typing systems perform poorly on general entities (e.g. \"ex-president\") as compared to named entities (e.g. \"Barack Obama\"). This is due to a lack of general entities in existing training data sets. We show that this problem can be mitigated by automatically generating training data from WordNets. We use a German WordNet equivalent, Ger-maNet, to automatically generate training data for German general entity typing. We use this data to supplement named entity data to train a neural fine-grained entity typing system. This leads to a 10% improvement in accuracy of the prediction of level 1 FIGER types for German general entities, while decreasing named entity type prediction accuracy by only 1%.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "The task of fine-grained entity typing is to assign a semantic label (e.g. '/person/politician' or '/location/city') to an entity in a natural language sentence. In contrast to coarse grained entity typing it uses a larger set of types (e.g. 112 types in the FIGER ontology (Ling and Weld, 2012) ), and a multilevel type hierarchy. An example of fine grained entity typing can be seen in Figure 1 . Finegrained entity typing is an important initial step in context sensitive tasks such as relation extraction (Kuang et al., 2020) , question answering (Yavuz et al., 2016) and knowledge base construction (Hosseini et al., 2019) .",
"cite_spans": [
{
"start": 274,
"end": 295,
"text": "(Ling and Weld, 2012)",
"ref_id": "BIBREF10"
},
{
"start": 509,
"end": 529,
"text": "(Kuang et al., 2020)",
"ref_id": "BIBREF8"
},
{
"start": 551,
"end": 571,
"text": "(Yavuz et al., 2016)",
"ref_id": "BIBREF17"
},
{
"start": 604,
"end": 627,
"text": "(Hosseini et al., 2019)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [
{
"start": 388,
"end": 396,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Entities can appear in text in many forms. In the sentences 'Barack Obama visited Hawaii. The expresident enjoyed the fine weather.' both 'Barack Obama' and 'ex-president' should be assigned the type '/person/politician' by a fine-grained entity typing system. While the typing of the named entity (NE) 'Barack Obama' can be performed by state of the art entity typing systems, it is unclear how well these systems perform on general entities (GEs) like 'ex-president'. We find that accuracy and F1 score of a state-of-the-art German fine-grained entity typing system are 17% lower on general entities than on named entities (see Table 1 and section 5). This is because the training data for these systems contains only named entities, but not general entities (e.g. Weber and Steedman (2021, under submission) ; Ling and Weld (2012) ). This is the problem we address with our approach.",
"cite_spans": [
{
"start": 767,
"end": 810,
"text": "Weber and Steedman (2021, under submission)",
"ref_id": null
},
{
"start": 813,
"end": 833,
"text": "Ling and Weld (2012)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [
{
"start": 630,
"end": 637,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Because manual annotation of training data is costly and time intensive we propose an approach that uses existing resources to create silver annotated GE typing data. For this we use German text taken from Wikipedia, GermaNet (a German Word-Net equivalent, Hamp and Feldweg (1997) ) and the FIGER type ontology (Ling and Weld, 2012) . The resulting data can be added to existing NE typing data for the training of a neural entity typing system. In our approach we use the hierarchical typing model of Chen et al. (2020) , which builds upon contextualized word embeddings. It has shown good performance on public benchmarks and is freely available.",
"cite_spans": [
{
"start": 257,
"end": 280,
"text": "Hamp and Feldweg (1997)",
"ref_id": "BIBREF5"
},
{
"start": 311,
"end": 332,
"text": "(Ling and Weld, 2012)",
"ref_id": "BIBREF10"
},
{
"start": 501,
"end": 519,
"text": "Chen et al. (2020)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We compare our approach against using only NE data for training and a rule-based approach and achieve 10% improvement in accuracy of the prediction of level 1 FIGER types for German general entities, while decreasing named entity prediction accuracy by only 1%. Our approach can be seen as a proof of concept and a blueprint for the use of existing WordNet resources to improve entity typing quality in other languages and domains.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The problem of GE typing performance has not been examined specifically before, nor has it been addressed for the case of German. Choi et al. (2018) create a fine-grained entity typing system that is capable of typing both GE and NE in English by integrating GEs into their training data. Their approach relies on large amounts of manually annotated data, and is therefore not feasible for our case. Moreover they propose a new type hierarchy, while we stick to the widely used FIGER type hierarchy, to make the output of our system consistent with that of other systems for tasks like multilingual knowledge graph construction.",
"cite_spans": [
{
"start": 130,
"end": 148,
"text": "Choi et al. (2018)",
"ref_id": "BIBREF1"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related work",
"sec_num": "2"
},
{
"text": "Recent advances in typing NE in English have harnessed the power of contextualized word embeddings (Peters et al., 2018; Conneau et al., 2020) to encode entities and their context. These approaches use the AIDA, BNN, OntoNotes and FIGER ontologies, which come with their own human annotated data sets (Chen et al., 2020; Dai et al., 2019; L\u00f3pez et al., 2019) . By choosing to use the model of (Chen et al., 2020), we build upon their strengths to enable GE typing in German.",
"cite_spans": [
{
"start": 99,
"end": 120,
"text": "(Peters et al., 2018;",
"ref_id": "BIBREF12"
},
{
"start": 121,
"end": 142,
"text": "Conneau et al., 2020)",
"ref_id": "BIBREF2"
},
{
"start": 301,
"end": 320,
"text": "(Chen et al., 2020;",
"ref_id": "BIBREF0"
},
{
"start": 321,
"end": 338,
"text": "Dai et al., 2019;",
"ref_id": "BIBREF3"
},
{
"start": 339,
"end": 358,
"text": "L\u00f3pez et al., 2019)",
"ref_id": "BIBREF11"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related work",
"sec_num": "2"
},
{
"text": "German NE typing suffers from a lack of manually annotated resources. Two recent approaches by by Ruppenhofer et al. (2020) and Leitner et al. (2020) use manually annotated data from biographic interviews and court proceedings. Owing to the specific domains, the authors modify existing type onthologies (OntoNotes in the case of biographic interviews) or come up with their own type ontology (in the case of court proceedings). This limits the way their models can be applied to other domains or used for multilingual tasks. Weber and Steedman (2021, under submission) use annotation projection to create a training data set of Wikipedia text annotated with FIGER types. We build upon their data set to create a German model that types both NEs and GEs.",
"cite_spans": [
{
"start": 98,
"end": 123,
"text": "Ruppenhofer et al. (2020)",
"ref_id": "BIBREF13"
},
{
"start": 128,
"end": 149,
"text": "Leitner et al. (2020)",
"ref_id": "BIBREF9"
},
{
"start": 526,
"end": 569,
"text": "Weber and Steedman (2021, under submission)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related work",
"sec_num": "2"
},
{
"text": "GermaNet (Hamp and Feldweg, 1997 ) is a broadcoverage lexical-semantic net for German which contains 16.000 words and is modelled after the En- glish WordNet (Fellbaum, 2010) . The net contains links that connect nouns to their hyponyms and hypernyms. This way GermaNet implicitly contains a fine-grained ontology of nouns. Although some NE are contained in GermaNet, the vast majority of nouns are GEs.",
"cite_spans": [
{
"start": 9,
"end": 32,
"text": "(Hamp and Feldweg, 1997",
"ref_id": "BIBREF5"
},
{
"start": 158,
"end": 174,
"text": "(Fellbaum, 2010)",
"ref_id": "BIBREF4"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Method",
"sec_num": "3"
},
{
"text": "We manually map the 112 FIGER types to nouns in GermaNet. Starting from a German translation of the type name (e.g. the type 'person' translates to 'Mensch') we add terms that best describe the FIGER type. This mapping enables us to look up a word in GermaNet and check if any of its hypernyms are mapped to a FIGER type. If this is the case, we can assign the corresponding FIGER type to the word in question. Figure 2 illustrates this method. We use this method to generate German GE training data and as our rule-based baseline.",
"cite_spans": [],
"ref_spans": [
{
"start": 411,
"end": 419,
"text": "Figure 2",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "Method",
"sec_num": "3"
},
{
"text": "We use this GE training data in addition to German NE typing data to train the hierarchical typing model of Chen et al. (2020) . In this model the entity and its context are encoded using XLM-RoBERTa (Conneau et al., 2020) . For each type in the FIGER ontology the model learns a type embedding. We pass the concatenated entity and context vector trough a 2-layer feed-forward network that maps into the same space as the type embedding. The score is an inner product between the transformed entity and context vector and the type embedding. For further model details refer to Chen et al. (2020) .",
"cite_spans": [
{
"start": 108,
"end": 126,
"text": "Chen et al. (2020)",
"ref_id": "BIBREF0"
},
{
"start": 200,
"end": 222,
"text": "(Conneau et al., 2020)",
"ref_id": "BIBREF2"
},
{
"start": 577,
"end": 595,
"text": "Chen et al. (2020)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Method",
"sec_num": "3"
},
{
"text": "As a NE training set we use the German finegrained entity typing corpus of Weber and Steedman (2021, under submission) . This data set was generated from the WikiMatrix corpus by Schwenk et al. (2019) using annotation projection.",
"cite_spans": [
{
"start": 75,
"end": 118,
"text": "Weber and Steedman (2021, under submission)",
"ref_id": null
},
{
"start": 179,
"end": 200,
"text": "Schwenk et al. (2019)",
"ref_id": "BIBREF14"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Experimental setup 4.1 Data sets",
"sec_num": "4"
},
{
"text": "To create the GE training data, we use the German portion of the WikiMatrix corpus. By using the same genre we make sure that no additional noise is added by domain differences. Moreover, the original English FIGER data set was created from Wikipedia text, so we can assume that all FIGER types are well represented in the WikiMatrix data.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experimental setup 4.1 Data sets",
"sec_num": "4"
},
{
"text": "To generate GE training data we take the following steps: First, we split off 100 K sentences from the top of the German part of the WikiMatrix corpus. We use spaCy (Honnibal et al., 2020) for part of speech tagging. Every word tagged as a noun is looked up in GermaNet. We use the method described in Section 3 to assign FIGER types to the noun.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "GE training data generation",
"sec_num": "4.2"
},
{
"text": "This lookup in GermaNet is not context-aware, so polysemous words are assigned multiple contradicting types. We only include words in our training data that have less than two level 1 types and not more than one level 2 type. This filter discards about 41% of all input words. We discuss the implications of this filter in Section 6. The resulting corpus consists of 200K sentences of German FIGER typed GE data 1 .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "GE training data generation",
"sec_num": "4.2"
},
{
"text": "In our experiments we compare six different training setups against a rule-based baseline using only GermaNet.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Training set up",
"sec_num": "4.3"
},
{
"text": "Only NE data: In this setup we train the hierarchical typing model on 200K sentences taken from the German fine-grained NE typing corpus by Weber and Steedman (2021, under submission) .",
"cite_spans": [
{
"start": 140,
"end": 183,
"text": "Weber and Steedman (2021, under submission)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Training set up",
"sec_num": "4.3"
},
{
"text": "Mixing NE and GE data: In this setup we add either 20K, 40K, 60K, 80K or 100K sentences of automatically generated GE training data to 200K sentences taken from the corpus of Weber and Steedman (2021, under submission) and train the hierarchical typing model on it. We shuffle the sentence order before training.",
"cite_spans": [
{
"start": 175,
"end": 218,
"text": "Weber and Steedman (2021, under submission)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Training set up",
"sec_num": "4.3"
},
{
"text": "Baseline: We compare these two neural approaches against using only GermaNet. In this baseline we use the approach described in Section 3 and Figure 2 to type our test data.",
"cite_spans": [],
"ref_spans": [
{
"start": 142,
"end": 150,
"text": "Figure 2",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "Training set up",
"sec_num": "4.3"
},
{
"text": "Metrics Following previous fine-grained entity typing literature we evaluate the results of our model using strict accuracy (Acc) and micro F1 score. The strict accuracy is the ratio of instances where the predicted type set is exactly the same as the gold type set. The micro F1 score computes F1 score biased by class frequency. We also evaluate per hierarchy level accuracy (level 1 type labels being more coarse grained and level 2 labels more fine grained).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation",
"sec_num": "4.4"
},
{
"text": "Test sets We use the German NE typing test set of Weber and Steedman (2021, under submission) for testing the performance of our systems on the task of NE typing. The test set consists of 500 manually annotated sentences.",
"cite_spans": [
{
"start": 50,
"end": 93,
"text": "Weber and Steedman (2021, under submission)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation",
"sec_num": "4.4"
},
{
"text": "We create our GE typing data sets by taking that same test set and manually replacing the named entities in it with plausible general entities (e.g. swapping 'Barack Obama' for 'ex-president'). Where this was not possible, we chose another noun from the sentence and manually added the correct type. In all other cases we removed the sentence from the data set. The resulting GE data set consists of 400 sentences, which we split into a 100 sentence development set and a 300 sentence test set. Table 1 shows the accuracy and F1 scores on the gold German test set. Additionally, development set results are presented in appendix A. We compare the performance of models trained with different amounts of GE data on the GE and NE test sets described in section 4.4.",
"cite_spans": [],
"ref_spans": [
{
"start": 495,
"end": 502,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Evaluation",
"sec_num": "4.4"
},
{
"text": "The test set performance on NE is best when no GE data is added, but GE performance is at its lowest. After adding 20K sentences of GE training data the level 1 accuracy and F1 score on the GE test set rises by 9%. Increasing the amount of GE training data to 40K improves the GE test set performance further with best level 1 results at 40K sentences GE data and best level 2 results at 60K sentences GE data. Adding more GE data beyond these points decreases GE performance. Acc L2 F1 L2 Model NE GE NE GE NE GE NE GE 200K (only NE) Although NE performance is worsened by adding GE training data, the decrease in level 1 performance in both accuracy and F1 is only 1% for 20K and 40K GE sentences, with a maximum decrease of 3% when 100K GE sentences are added.",
"cite_spans": [],
"ref_spans": [
{
"start": 477,
"end": 545,
"text": "Acc L2 F1 L2 Model NE GE NE GE NE GE NE GE 200K (only NE)",
"ref_id": null
}
],
"eq_spans": [],
"section": "Results",
"sec_num": "5"
},
{
"text": "Adding GE training data has a smaller effect on level 2 performance than on level 1 performance, with level 2 accuracy and F1 on the GE test set increasing by 5% when 60K sentences of GE data are added. Adding GE training data initially decreases performance on NE level 2 types, but at 60K sentences of GE data is just as good as without them.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acc L1 F1 L1",
"sec_num": null
},
{
"text": "Adding more than 60K sentences of GE data does not improve GE test set performance, but decreases both NE and GE test set performance in accuracy and F1 score. We can also see that the GermaNet baseline is outperformed by all systems, although its performance on level 2 GE types is close to our best models. We will discuss possible explanations in the next section.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acc L1 F1 L1",
"sec_num": null
},
{
"text": "The results show that the models' performance on GE typing can be improved using a simple data augmentation method using WordNet, while only lightly impacting the performance on NE typing.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "6"
},
{
"text": "All neural models outperform the GermaNet baseline. This raises the question why the neural systems were able to perform better than GermaNet on GE, although the training data was generated from GermaNet. We speculate that the hierarchical typing model is very context sensitive because of its usage of contextualized word embeddings (XLM-RoBERTa) to encode entities and their context during training. While our GE data provides it with high confidence non-polysemous examples, it is able to learn which context goes with which type.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "6"
},
{
"text": "At test time this awareness of context enables the neural systems to disambiguate polysemous cases, even though it has not observed these cases at training time. This intuition is supported by our test results: For the best performing model (240K) 40% of the general entities that occur in our test set are never seen in the training data.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "6"
},
{
"text": "A second reason why the neural models outperform GermaNet is that GermaNet does not represent every German noun. A certain word might not be part of GermaNet and therefor no type can be assigned. This is the case for 23% of words seen during training data generation. The neural models do not have this problem because our vocabulary is larger than the 16.000 words contained in Ger-maNet and because the neural models assign type labels to out of vocabulary words on the basis of the language model XML-RoBERTa.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "6"
},
{
"text": "Despite these factors the neural models' performance is closely matched by the GermaNet baseline on level 2 labels. Level 2 types are underrepresented in the data, because their prevalence follows their occurrence in the Wikipedia data. This leads to some low-level types being very rare: a signal that is too weak to be learned sufficiently by a neural model. On the other hand, a lookup of words in a preexisting data base like GermaNet is not affected by this issue. While the neural models offer high recall at low precision, GermaNet has higher precision at low recall.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "6"
},
{
"text": "The results also show that 20K sentences of GE data produce the highest increase of GE performance while impacting NE performance least. Adding GE data beyond 60K sentences does not only worsen NE performance by also GE performance. This is due to noise in the GE typing data. A manual error analysis of 100 GE training data sentences shows that 35% have incorrect type assignments. With more GE training data the model starts to overfit to this noise, which leads to decreasing test set performance, affecting NE performance slightly more than GE performance.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "6"
},
{
"text": "In this paper we have shown that it is possible to improve the performance of a German fine-grained entity typing system using GermaNet. We create silver annotated general entity typing data for training a fine-grained entity typing model that builds upon contextualised word embeddings (in our case, XLM-RoBERTa). Our results can be taken as a blueprint for improving fine-grained entity typing performance in other languages and domains, as there are WordNets for over 40 different languages. Moreover, the manual mapping we introduced could be replaced by machine-translating English type labels into the language of the Word-Net, which would require less resources for human annotation than a manual mapping.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion and future work",
"sec_num": "7"
},
{
"text": "Avenues for future work could be a combination between high-precison but low recall WordNets and neural models, e.g. through incorporating the models' prediction confidence to make a decision whether a WordNet look-up should be trusted over the models' own prediction.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion and future work",
"sec_num": "7"
},
{
"text": "The problem of general entity typing could also be viewed through the lens of coreference resolution: The type of a general entity could be inferred from a named entity that the general entity refers to. However, there might be cases in which no named entity referent exists, or domains and languages where coreference resolution systems are unavailable. In all of these cases combining our method with existing approaches opens new possibilities.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion and future work",
"sec_num": "7"
},
{
"text": "The generation code and generated data can be found here: https://github.com/webersab/german_general_entity_typing",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "Development set results can be seen in Table 2 . We use the development set to determine which amount of of added GEs achieves the best result. The exact amount of GEs necessary for an ideal result might vary depending on the fine-grained entity typing model and the NE data used. The development set enables the user to determine this amount for their individual application. Best development set performance aligns with best test set performance on Level 1 metrics, and is only off by 1% for Level 2 metrics.",
"cite_spans": [],
"ref_spans": [
{
"start": 39,
"end": 46,
"text": "Table 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "A Development set results",
"sec_num": null
},
{
"text": "In keeping with the NAACL reproducibility guildines we report the following implementation details of our model: We trained all models using a single GeForce RTX 2080 Ti GPU. Training each of the models took under an hour. The number of model parameters is 50484362. All hyperparameters of the model were taken from the implementation of Chen et al. (2020) . All additional code used and all of our data sets are available on github.com/webersab/german_general_entity_typing.",
"cite_spans": [
{
"start": 338,
"end": 356,
"text": "Chen et al. (2020)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "B Reproducibility",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Hierarchical entity typing via multi-level learning to rank",
"authors": [
{
"first": "Tongfei",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Yunmo",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Benjamin",
"middle": [],
"last": "Van Durme",
"suffix": ""
}
],
"year": 2020,
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
"volume": "2020",
"issue": "",
"pages": "8465--8475",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Tongfei Chen, Yunmo Chen, and Benjamin Van Durme. 2020. Hierarchical entity typing via multi-level learning to rank. In Proceedings of the 58th Annual Meeting of the Association for Computational Lin- guistics, ACL 2020, Online, July 5-10, 2020, pages 8465-8475.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Ultra-fine entity typing",
"authors": [
{
"first": "Eunsol",
"middle": [],
"last": "Choi",
"suffix": ""
},
{
"first": "Omer",
"middle": [],
"last": "Levy",
"suffix": ""
},
{
"first": "Yejin",
"middle": [],
"last": "Choi",
"suffix": ""
},
{
"first": "Luke",
"middle": [],
"last": "Zettlemoyer",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics",
"volume": "1",
"issue": "",
"pages": "87--96",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Eunsol Choi, Omer Levy, Yejin Choi, and Luke Zettle- moyer. 2018. Ultra-fine entity typing. In Proceed- ings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Pa- pers), pages 87-96.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Unsupervised cross-lingual representation learning at scale",
"authors": [
{
"first": "Alexis",
"middle": [],
"last": "Conneau",
"suffix": ""
},
{
"first": "Kartikay",
"middle": [],
"last": "Khandelwal",
"suffix": ""
},
{
"first": "Naman",
"middle": [],
"last": "Goyal",
"suffix": ""
},
{
"first": "Vishrav",
"middle": [],
"last": "Chaudhary",
"suffix": ""
},
{
"first": "Guillaume",
"middle": [],
"last": "Wenzek",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Francisco Guzm\u00e1n",
"suffix": ""
},
{
"first": "Myle",
"middle": [],
"last": "Grave",
"suffix": ""
},
{
"first": "Luke",
"middle": [],
"last": "Ott",
"suffix": ""
},
{
"first": "Veselin",
"middle": [],
"last": "Zettlemoyer",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Stoyanov",
"suffix": ""
}
],
"year": 2020,
"venue": "ACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\u00e1n, E. Grave, Myle Ott, Luke Zettlemoyer, and Veselin Stoyanov. 2020. Unsupervised cross-lingual representation learning at scale. In ACL.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Improving fine-grained entity typing with entity linking",
"authors": [
{
"first": "Hongliang",
"middle": [],
"last": "Dai",
"suffix": ""
},
{
"first": "Donghong",
"middle": [],
"last": "Du",
"suffix": ""
},
{
"first": "Xin",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Yangqiu",
"middle": [],
"last": "Song",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)",
"volume": "",
"issue": "",
"pages": "6211--6216",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hongliang Dai, Donghong Du, Xin Li, and Yangqiu Song. 2019. Improving fine-grained entity typing with entity linking. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 6211-6216.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Theory and applications of ontology: computer applications",
"authors": [
{
"first": "Christiane",
"middle": [],
"last": "Fellbaum",
"suffix": ""
}
],
"year": 2010,
"venue": "",
"volume": "",
"issue": "",
"pages": "231--243",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Christiane Fellbaum. 2010. Wordnet. In Theory and applications of ontology: computer applications, pages 231-243. Springer.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Automatic information extraction and building of lexical semantic resources for NLP applications",
"authors": [
{
"first": "Birgit",
"middle": [],
"last": "Hamp",
"suffix": ""
},
{
"first": "Helmut",
"middle": [],
"last": "Feldweg",
"suffix": ""
}
],
"year": 1997,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Birgit Hamp and Helmut Feldweg. 1997. Germanet-a lexical-semantic net for german. In Automatic infor- mation extraction and building of lexical semantic resources for NLP applications.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "spaCy: Industrial-strength Natural Language Processing in Python",
"authors": [],
"year": null,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"DOI": [
"10.5281/zenodo.1212303"
]
},
"num": null,
"urls": [],
"raw_text": "Matthew Honnibal, Ines Montani, Sofie Van Lan- deghem, and Adriane Boyd. 2020. spaCy: Industrial-strength Natural Language Processing in Python.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Duality of link prediction and entailment graph induction",
"authors": [
{
"first": "Mohammad Javad",
"middle": [],
"last": "Hosseini",
"suffix": ""
},
{
"first": "B",
"middle": [],
"last": "Shay",
"suffix": ""
},
{
"first": "Mark",
"middle": [],
"last": "Cohen",
"suffix": ""
},
{
"first": "Mark",
"middle": [],
"last": "Johnson",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Steedman",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "4736--4746",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mohammad Javad Hosseini, Shay B Cohen, Mark Johnson, and Mark Steedman. 2019. Duality of link prediction and entailment graph induction. In Pro- ceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 4736- 4746.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Improving neural relation extraction with implicit mutual relations",
"authors": [
{
"first": "Jun",
"middle": [],
"last": "Kuang",
"suffix": ""
},
{
"first": "Yixin",
"middle": [],
"last": "Cao",
"suffix": ""
},
{
"first": "Jianbing",
"middle": [],
"last": "Zheng",
"suffix": ""
},
{
"first": "Xiangnan",
"middle": [],
"last": "He",
"suffix": ""
},
{
"first": "Ming",
"middle": [],
"last": "Gao",
"suffix": ""
},
{
"first": "Aoying",
"middle": [],
"last": "Zhou",
"suffix": ""
}
],
"year": 2020,
"venue": "2020 IEEE 36th International Conference on Data Engineering (ICDE)",
"volume": "",
"issue": "",
"pages": "1021--1032",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jun Kuang, Yixin Cao, Jianbing Zheng, Xiangnan He, Ming Gao, and Aoying Zhou. 2020. Improving neu- ral relation extraction with implicit mutual relations. In 2020 IEEE 36th International Conference on Data Engineering (ICDE), pages 1021-1032. IEEE.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "A dataset of german legal documents for named entity recognition",
"authors": [
{
"first": "Elena",
"middle": [],
"last": "Leitner",
"suffix": ""
},
{
"first": "Georg",
"middle": [],
"last": "Rehm",
"suffix": ""
},
{
"first": "Juli\u00e1n",
"middle": [],
"last": "Moreno-Schneider",
"suffix": ""
}
],
"year": 2020,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:2003.13016"
]
},
"num": null,
"urls": [],
"raw_text": "Elena Leitner, Georg Rehm, and Juli\u00e1n Moreno- Schneider. 2020. A dataset of german legal docu- ments for named entity recognition. arXiv preprint arXiv:2003.13016.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Fine-grained entity recognition",
"authors": [
{
"first": "Xiao",
"middle": [],
"last": "Ling",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Daniel S Weld",
"suffix": ""
}
],
"year": 2012,
"venue": "AAAI",
"volume": "12",
"issue": "",
"pages": "94--100",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Xiao Ling and Daniel S Weld. 2012. Fine-grained en- tity recognition. In AAAI, volume 12, pages 94-100.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Fine-grained entity typing in hyperbolic space",
"authors": [
{
"first": "Federico",
"middle": [],
"last": "L\u00f3pez",
"suffix": ""
},
{
"first": "Benjamin",
"middle": [],
"last": "Heinzerling",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Strube",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 4th Workshop on Representation Learning for NLP",
"volume": "",
"issue": "",
"pages": "169--180",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Federico L\u00f3pez, Benjamin Heinzerling, and Michael Strube. 2019. Fine-grained entity typing in hyper- bolic space. In Proceedings of the 4th Workshop on Representation Learning for NLP (RepL4NLP- 2019), pages 169-180, Florence, Italy. Association for Computational Linguistics.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Deep contextualized word representations",
"authors": [
{
"first": "Matthew",
"middle": [
"E"
],
"last": "Peters",
"suffix": ""
},
{
"first": "Mark",
"middle": [],
"last": "Neumann",
"suffix": ""
},
{
"first": "Mohit",
"middle": [],
"last": "Iyyer",
"suffix": ""
},
{
"first": "Matt",
"middle": [],
"last": "Gardner",
"suffix": ""
},
{
"first": "Christopher",
"middle": [],
"last": "Clark",
"suffix": ""
},
{
"first": "Kenton",
"middle": [],
"last": "Lee",
"suffix": ""
},
{
"first": "Luke",
"middle": [],
"last": "Zettlemoyer",
"suffix": ""
}
],
"year": 2018,
"venue": "Proc. of NAACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Matthew E. Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word repre- sentations. In Proc. of NAACL.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Fine-grained named entity annotations for german biographic interviews",
"authors": [
{
"first": "Josef",
"middle": [],
"last": "Ruppenhofer",
"suffix": ""
},
{
"first": "Ines",
"middle": [],
"last": "Rehbein",
"suffix": ""
},
{
"first": "Carolina",
"middle": [],
"last": "Flinz",
"suffix": ""
}
],
"year": 2020,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Josef Ruppenhofer, Ines Rehbein, and Carolina Flinz. 2020. Fine-grained named entity annotations for german biographic interviews.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Wikimatrix: Mining 135m parallel sentences in 1620",
"authors": [
{
"first": "Holger",
"middle": [],
"last": "Schwenk",
"suffix": ""
},
{
"first": "Vishrav",
"middle": [],
"last": "Chaudhary",
"suffix": ""
},
{
"first": "Shuo",
"middle": [],
"last": "Sun",
"suffix": ""
},
{
"first": "Hongyu",
"middle": [],
"last": "Gong",
"suffix": ""
},
{
"first": "Francisco",
"middle": [],
"last": "Guzm\u00e1n",
"suffix": ""
}
],
"year": 2019,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Holger Schwenk, Vishrav Chaudhary, Shuo Sun, Hongyu Gong, and Francisco Guzm\u00e1n. 2019. Wiki- matrix: Mining 135m parallel sentences in 1620",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Table 2: We report development set and test set performance of the fine-grained entity typing model trained with different amounts of general entity training data. Best development set performance aligns with best test set performance on Level 1 metrics, and is only off by 1% for Level 2 metrics. language pairs from wikipedia",
"authors": [],
"year": null,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1907.05791"
]
},
"num": null,
"urls": [],
"raw_text": "Table 2: We report development set and test set perfor- mance of the fine-grained entity typing model trained with different amounts of general entity training data. Best development set performance aligns with best test set performance on Level 1 metrics, and is only off by 1% for Level 2 metrics. language pairs from wikipedia. arXiv preprint arXiv:1907.05791.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "2021, under submission. Fine-grained named entity typing beyond english using annotation projection",
"authors": [
{
"first": "Sabine",
"middle": [],
"last": "Weber",
"suffix": ""
},
{
"first": "Mark",
"middle": [],
"last": "Steedman",
"suffix": ""
}
],
"year": null,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sabine Weber and Mark Steedman. 2021, under sub- mission. Fine-grained named entity typing beyond english using annotation projection.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Improving semantic parsing via answer type inference",
"authors": [
{
"first": "Semih",
"middle": [],
"last": "Yavuz",
"suffix": ""
},
{
"first": "Izzeddin",
"middle": [],
"last": "Gur",
"suffix": ""
},
{
"first": "Yu",
"middle": [],
"last": "Su",
"suffix": ""
},
{
"first": "Mudhakar",
"middle": [],
"last": "Srivatsa",
"suffix": ""
},
{
"first": "Xifeng",
"middle": [],
"last": "Yan",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "149--159",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Semih Yavuz, Izzeddin Gur, Yu Su, Mudhakar Srivatsa, and Xifeng Yan. 2016. Improving semantic parsing via answer type inference. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 149-159.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"num": null,
"type_str": "figure",
"uris": null,
"text": "Fine-grained entity typing with the FIGER ontology in English. Correct types are highlighted."
},
"FIGREF1": {
"num": null,
"type_str": "figure",
"uris": null,
"text": "An example of FIGER type assignment using GermaNet. The manual mapping between GermaNet and FIGER is indicated by double lines. Whenever a word in the hypernym path of the input word is mapped to a FIGER type, the respective type gets assigned."
}
}
}
}