|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T11:27:03.583196Z" |
|
}, |
|
"title": "LibKGE A knowledge graph embedding library for reproducible research", |
|
"authors": [ |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Broscheit", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Mannheim", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Ruffinelli", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Mannheim", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Adrian", |
|
"middle": [], |
|
"last": "Kochsiek", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Mannheim", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Betz", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Mannheim", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Rainer", |
|
"middle": [], |
|
"last": "Gemulla", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Mannheim", |
|
"location": { |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "LIBKGE 1 is an open-source PyTorch-based library for training, hyperparameter optimization, and evaluation of knowledge graph embedding models for link prediction. The key goals of LIBKGE are to enable reproducible research, to provide a framework for comprehensive experimental studies, and to facilitate analyzing the contributions of individual components of training methods, model architectures, and evaluation methods. LIBKGE is highly configurable and every experiment can be fully reproduced with a single configuration file. Individual components are decoupled to the extent possible so that they can be mixed and matched with each other. Implementations in LIBKGE aim to be as efficient as possible without leaving the scope of Python/Numpy/PyTorch. A comprehensive logging mechanism and tooling facilitates indepth analysis. LIBKGE provides implementations of common knowledge graph embedding models and training methods, and new ones can be easily added. A comparative study (Ruffinelli et al., 2020) showed that LIBKGE reaches competitive to state-of-the-art performance for many models with a modest amount of automatic hyperparameter tuning.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "LIBKGE 1 is an open-source PyTorch-based library for training, hyperparameter optimization, and evaluation of knowledge graph embedding models for link prediction. The key goals of LIBKGE are to enable reproducible research, to provide a framework for comprehensive experimental studies, and to facilitate analyzing the contributions of individual components of training methods, model architectures, and evaluation methods. LIBKGE is highly configurable and every experiment can be fully reproduced with a single configuration file. Individual components are decoupled to the extent possible so that they can be mixed and matched with each other. Implementations in LIBKGE aim to be as efficient as possible without leaving the scope of Python/Numpy/PyTorch. A comprehensive logging mechanism and tooling facilitates indepth analysis. LIBKGE provides implementations of common knowledge graph embedding models and training methods, and new ones can be easily added. A comparative study (Ruffinelli et al., 2020) showed that LIBKGE reaches competitive to state-of-the-art performance for many models with a modest amount of automatic hyperparameter tuning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Knowledge graphs (KG) (Hayes-Roth, 1983 ) encode real-world facts as structured data. A knowledge graph can be represented as a set of (subject, relation, object)-triples, where the subject and object entities correspond to vertices, and relations to labeled edges in a graph.", |
|
"cite_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 39, |
|
"text": "(Hayes-Roth, 1983", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "KG embedding (KGE) models represent the KG's entities and relations as dense vectors, termed embeddings. KGE models compute a score based on these embeddings and are trained with the objective of predicting high scores for true triples and 1 https://github.com/uma-pi1/kge low scores for false triples. Link prediction is the task of predicting edges missing in the KG (Nickel et al., 2015) . Some uses of KGE models are: enhancing the knowledge representation in language models (Peters et al., 2019) , drug discovery in biomedical KGs (Mohamed et al., 2019) , as part of recommender systems (Wang et al., 2017) , or for visual relationship detection (Baier et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 369, |
|
"end": 390, |
|
"text": "(Nickel et al., 2015)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 480, |
|
"end": 501, |
|
"text": "(Peters et al., 2019)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 533, |
|
"end": 559, |
|
"text": "KGs (Mohamed et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 593, |
|
"end": 612, |
|
"text": "(Wang et al., 2017)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 652, |
|
"end": 672, |
|
"text": "(Baier et al., 2017)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "KGE models for link prediction have seen a heightened interest in recent years. Many components of the KGE pipeline-i.e., KGE models, training methods, evaluation techniques, and hyperparameter optimization-have been studied in the literature, as well as the whole pipeline itself (Nickel et al., 2016; Wang et al., 2017; Ali et al., 2020) . Ruffinelli et al. (2020) argued that it is difficult to reach a conclusion about the impact of each component based on the original publications. For example, multiple components may have been changed simultaneously without performing an ablation study, baselines may not have been trained with state-of-the-art methods, or the hyperparameter space may not have been sufficiently explored.", |
|
"cite_spans": [ |
|
{ |
|
"start": 281, |
|
"end": 302, |
|
"text": "(Nickel et al., 2016;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 303, |
|
"end": 321, |
|
"text": "Wang et al., 2017;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 322, |
|
"end": 339, |
|
"text": "Ali et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 366, |
|
"text": "Ruffinelli et al. (2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "LIBKGE is an open-source KGE library for reproducible research. It aims to facilitate meaningful experimental comparisons of all components of the KGE pipeline. To this end, LIBKGE is faithful to the following principles:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Modularization and extensibility. LIBKGE is cleanly modularized. Individual components can be mixed and matched with each other, and new components can be easily added.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "and reproducibility. In LIBKGE an experiment is entirely defined by a single configuration file with well-documented configuration options for every component. When an experiment is started, its current configuration is stored alongside the model to enable reproducibility and analysis.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Configurability", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Profiling and analysis. LIBKGE performs extensive logging during experiments and monitors performance metrics such as runtime, memory usage, training loss, and evaluation metrics. Additionally, specific monitoring of any part of the KGE pipeline can be added via a hook system. The logging is done in both human-readable form and in a machine-readable format.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Configurability", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Ease of use. LIBKGE is designed to support the workflow of researchers by convenient tooling and easy usage with single line commands. Each training job or hyperparameter search job can be interrupted and resumed at any time. For tuning of hyperparameters, LIBKGE supports grid search, quasi-random search and Bayesian Optimization. All implementations stay in the realm of Python/Py-Torch/Numpy and aim to be as efficient as possible.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Configurability", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "LIBKGE supports the needs of researchers who want to investigate new components or improvements of the KGE pipeline. The strengths of LIBKGE enabled a comprehensive study that provided new insights about training KGE models (Ruffinelli et al., 2020) . For an overview about usage, pretrained models, and detailed documentation, please refer to LIBKGE's project page. In this paper, we discuss the key principles of LIBKGE.", |
|
"cite_spans": [ |
|
{ |
|
"start": 224, |
|
"end": 249, |
|
"text": "(Ruffinelli et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Configurability", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "LIBKGE is highly modularized, which allows to mix and match training methods, models, and evaluation methods (see Figure 1 ). The modularization allows for simple and clean ways to extend the framework with new features that will be available for every model. For example, LIBKGE decouples the Relation-alScorer (the KGE scoring function) and KgeEmbedder (the way embeddings are obtained) as depicted in Figure 1 . In other frameworks, the embedder function is hardcoded to the equivalent of LIBKGE's LookupEmbedder, in which embeddings are explicitly stored for each entity. Due to LIBKGE's decoupling, the embedder type can be freely specified independently of the scoring function, which enables users to train a KGE model with other types of embedders. For example, the embedding function could be an encoder that computes an entity or relation embedding from textual descriptions or pixels of an image (Pezeshkpour et al., 2018; Broscheit et al., 2020, inter alia ", |
|
"cite_spans": [ |
|
{ |
|
"start": 907, |
|
"end": 933, |
|
"text": "(Pezeshkpour et al., 2018;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 934, |
|
"end": 968, |
|
"text": "Broscheit et al., 2020, inter alia", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 114, |
|
"end": 122, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 404, |
|
"end": 412, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Modularization and extensibility", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Reproducibility is important, which means that configuration is important. To enable reproducibility, it is key that the entire configuration of each experiment be persistently stored and accessible. While this sounds almost obvious, the crux is how this can be achieved. Typically, source code can and will change. Therefore, to make an experiment in a certain setting reproducible, the configuration for an experiment has to be decoupled from the code as much as possible.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Configurability and reproducibility", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In LIBKGE all settings are always retrieved from a configuration object that is initialized from configuration files and is used by all components of the pipeline. This leads to comprehensive configuration files that fully document an experiment and make it reproducible as well.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Configurability and reproducibility", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "To make this comprehensive configurability feasible-while also remaining modular-LIBKGE includes a lightweight import functionality for configuration files. In Figure 2 , we show an (almost) minimal configuration for an experiment for training a ComplEx KGE model (Trouillon et al., 2016) . The main configuration file my experiment.yaml in Figure 2 will automatically import the model-specific configuration complex.yaml, which in turn imports the configuration lookup embedder.yaml. The latter defines the default configurations of the LookupEmbedder for entities and relations, which associates every entity and relation identifier with its respective embedding. All configurations are merged into a single configuration object. During merging, the settings in the main configuration file always have precedence over the settings from imported files. The resulting single configuration will be automatically saved in the experiment directory along with the checkpoints and the log files.", |
|
"cite_spans": [ |
|
{ |
|
"start": 264, |
|
"end": 288, |
|
"text": "(Trouillon et al., 2016)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 168, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 341, |
|
"end": 349, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Configurability and reproducibility", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "As an example of how configurability also helps modularization, we come back to the example of switching the LookupEmbedder with an encoder that computes entity embeddings from string tokens. For this purpose, one may implement a TokenPoolEmbedder. The simple changes to the configuration that uses the new embedder type are demonstrated in Figure 3 (see line 12).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 341, |
|
"end": 349, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Configurability and reproducibility", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "It is worth noting that while the default settings in LIBKGE's main configuration file reflect the currently known best practices, LIBKGE also includes-and makes configurable-some settings that might not be considered best practice, e.g., different tie breaking schemes for ranking evaluations (Sun et al., 2020) . Therefore, with regards to configurability, the goal is not only that the frame- Figure 4 : An example for a hyperparameter optimization job. This configurations first runs 10 trials of a quasi-random search followed by 10 trials of Bayesian Optimization (see ax search.num trials and ax search.num sobol trials).", |
|
"cite_spans": [ |
|
{ |
|
"start": 294, |
|
"end": 312, |
|
"text": "(Sun et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 396, |
|
"end": 404, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Configurability and reproducibility", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "By setting the keys search.device pool and search.num workers in lines 3 and 4 the execution of the trials is parallelized to run 4 parallel trials distributed over two GPU devices.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Configurability and reproducibility", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "work reflects best practices, but also reflects popular practices that might influence ongoing research.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Configurability and reproducibility", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Hyperparameter optimization is crucial in empirically investigating the impact of individual components of the KGE pipeline. LIBKGE offers manual search, grid search, random search, and Bayesian Optimization; the latter two provided by the hyperparameter optimization framework Ax. 2 In this context, LIBKGE further benefits from its configurability because everything can be treated 2 https://ax.dev/ as a hyperparameter, even the choice of model, score function, or embedder. The example in Figure 4 shows a simple hyperparameter search with an initial quasi-random search, and a subsequent Bayesian Optimization phase over the learning rate, batch size and negative samples for the ComplEx model. The trials during the quasi-random search are independent, which can be exploited by parallelizing their runs over multiple devices. In this way, a comprehensive search over a large space of hyperparameters can be sped up significantly (also shown in the example; for more details, please refer to the documentation).", |
|
"cite_spans": [ |
|
{ |
|
"start": 282, |
|
"end": 283, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 493, |
|
"end": 501, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Hyperparameter optimization", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "LIBKGE provides extensive options for profiling, debugging, and analyzing the KGE pipeline. While most frameworks print the current training loss and some frameworks also record the validation metrics, LIBKGE aims to make every moving part of the pipeline observable. Per default, LIBKGE records during training things such as runtimes, training loss and penalties (e.g., from the entity and relation embedders), relevant meta data such as the PyTorch version and the current commit hash, and dependencies between various jobs. We show an example logging output during training one epoch in Appendix B. For more fine-grained logging, LIBKGE also can log at the batch level.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Profiling and metadata analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "During evaluation, the framework records many variations of the evaluation metrics, such as grouping relations by relation type, relation frequency, head or tail. Additionally, users can extract and add information by adding their custom function to one of multiple hooks that are executed before and after all relevant calls in the framework. In this way, users can interact with all components of the pipeline, without risking divergence from LIBKGE's master branch.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Profiling and metadata analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Finally, LIBKGE provides convenience methods to export (subsets of) the logged meta data into plain CSV files.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Profiling and metadata analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this section, we compare LIBKGE to other open source software (OSS) that provides functionality around training and evaluating KGE models for link prediction. The assessments are a snaphot taken at the end of May 2020. All model-specific comparisons have been evaluated w.r.t. the Com-plEx model, which is supported by all projects. Logging denotes the number of metadata keys that are logged per epoch for training and evaluation in a machine readable format for later analysis. Hyperparameter optimization shows if the project supports grid search, random search and Bayesian Optimization. Resume denotes the feature to resume hyperparameter search or training from checkpoints at any time. Active is the amount of commits to the master branch in the last 12 months.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to other KGE Projects", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In Table 1 , we provide an overview of other KGE projects (full references in Appendix C) and compare them w.r.t. configurability and ease of use. We mainly included projects that could be considered as a basis for a researcher's experiments because they are active, functional, and cover at least a few of the most common models. All projects can be extended with models, losses, or training methods. Large-scale projects and paper code projects-in comparison to more holistic frameworks-typically have a more narrow scope, e.g., they often do not feature hyperparameter optimization. Large-scale projects are typically tailored towards parallelizing training methods and models.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison to other KGE Projects", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The focus on configurability and reproducibility in LIBKGE is reflected by the large amount of configurable keys. For example, in contrast to other projects, LIBKGE does not tie the regularization weights of the entity and relation embedder to be the same. For entity ranking evaluation, only LIBKGE and PyKeen transparently implement different tie breaking schemes for equally ranked entities. This is important, because evaluation under different tie breaking schemes can result in differences of \u2248 .40 MRR in some models and can lead to misleading conclusions, as shown by Sun et al. (2020) . OpenKE, for example, only supports the problematic tie breaking scheme named TOP by Sun et al. (2020) . LIBKGE and PyKeen are the only frameworks that provide machine-readable logging. Only LIBKGE offers resuming from a checkpoint for training and hyperparameter search. LIBKGE, Ampligraph, and PyKeen are the most active projects in terms of amount of commits during the past 12 months. Table 2 , we show a comparison of KGE frameworks in terms of time for one full training epoch. The configuration setting was chosen such that it was supported by all frameworks, and also facilitates to demonstrate behaviour under varying load. We translate the configurations faithfully to each framework, ensuring that total number of embedding parameters per batch are the same for each framework. Most projects, including LIBKGE, can handle small numbers of negative samples efficiently, but LIBKGE seems to scale Table 2 : Runtime comparison between frameworks. The runtime is the time per epoch in seconds (averaged over 5 epochs executed on the same machine). The configuration is fixed to be similar for all frameworks (details in Appendix A). For negative samples, we show runtimes for random, i.e., sampling triples without checking if they are contained in the KG, and for pseudo-negative, which avoids sampling triples contained in the KG. The column parallel batch construction indicates whether the code in the training loop for generating the batches is parallelized; if yes, then we set the number of workers to 4. OOM stands for out-of-memory. OOT is short for out-of-time; we stopped the run when the first epoch did not finish within 30 minutes. (*) Graphvite is optimized for multi-gpu training with large batch sizes, therefore the chosen settings might not be optimal. better to higher numbers of negative samples. A large number of negative samples becomes important when large KGs with millions of entities are embedded. Although the runtimes are purely anecdotal and should be taken with a grain of salt, they do show that LIBKGE can provide competitive runtime performance. Currently, LIBKGE only supports single-node single-gpu training. It nevertheless fares well when compared to GraphVite, one of the large-scale frameworks that dispatches some routines into C/C++. LIBKGE also has optimized versions of negative sampling for large graphs, which enables it to train ComplEx on Wikidata-5m (Wang et al., 2019) , a large KG with 5M entities.", |
|
"cite_spans": [ |
|
{ |
|
"start": 576, |
|
"end": 593, |
|
"text": "Sun et al. (2020)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 680, |
|
"end": 697, |
|
"text": "Sun et al. (2020)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 3002, |
|
"end": 3021, |
|
"text": "(Wang et al., 2019)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 984, |
|
"end": 991, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1501, |
|
"end": 1508, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison to other KGE Projects", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Predictive performance. In Table 3 , we collected the reported performances for ComplEx on the dataset FB15K-237 (Toutanova and Chen, 2015) . The numbers are not comparable due to different amount of effort to find a good configuration 3 , but they reflect the performance that the framework authors achieved in their experiments. The results show that with LIBKGE's architecture and hyperparameter optimization a state-of-the-art result can be achieved. For more results obtained with LIBKGE and an in-depth analysis of the impact of hyperparameters on model performance we refer to the study by Ruffinelli et al. (2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 139, |
|
"text": "(Toutanova and Chen, 2015)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 597, |
|
"end": 621, |
|
"text": "Ruffinelli et al. (2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 34, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Efficiency In", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this work, we presented LIBKGE, a configurable, modular, and efficient framework for reproducible research on knowledge graph embedding models. We briefly described the internal structure of the framework and how it facilitates LIBKGE's goals. The framework is efficient and yields stateof-the-art performance. We hope that LIBKGE is a helpful ingredient to gain new insights into knowledge graph embeddings, and that a lively community gathers around this project to improve and extend it further. B Logging 0 { 1 \"entry_id\":84d75bf2-c3fe-4c6f-ac5e-001e1edb85de, 2 \"event\":\"job_created\", 3 \"folder\":/home/USER/kge/local/experiments/20200705-215353-toy-complex-train, 4 \"git_head\":7fad132,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "\"hostname\":USER-Workstation, 6 \"job\":\"eval\", 7 \"job_id\":683d00bf-520d-4919-937e-d9b634c11d2e, 8 \"parent_job_id\":dc960211-9cbe-4ba1-ad62-7ffd41d2017e, 9 \"timestamp\":1593978837.304522,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "5", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"torch_version\":1.5.0, 11 \"username\":\"USER\" 12 }{ 13 \"entry_id\":418889f0-728b-486f-9977-48795f6ed5fa,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "10", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"event\":\"job_created\",", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "14", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"folder\":/home/USER/kge/local/experiments/20200705-215353-toy-complex-train,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "15", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"git_head\":7fad132,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "16", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"hostname\":USER-Workstation,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "17", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"job\":\"train\",", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "18", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"job_id\":dc960211-9cbe-4ba1-ad62-7ffd41d2017e,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "19", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"timestamp\":1593978837.4033182,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "20", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"torch_version\":1.5.0, \"avg_penalty\":0.0003658807344436354,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "21", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"backward_time\":0.0765678882598877,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "31", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"batches\":20,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "32", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"entry_id\":4b07adfa-3e2b-42f4-a994-2b4f02e1b3f4,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "33", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"epoch\":1,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "34", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"epoch_time\":1.161754846572876,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "35", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"event\":\"epoch_completed\",", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "36", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"forward_time\":0.7509596347808838,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "37", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"job\":\"train\",", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "38", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"job_id\":dc960211-9cbe-4ba1-ad62-7ffd41d2017e,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "39", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"lr\":[ 0.2 ],", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "40", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"optimizer_time\":0.015013933181762695,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "41", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"other_time\":0.2690012454986572,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "42", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"prepare_time\":0.05021214485168457,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "43", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"scope\":\"epoch\",", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "44", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"size\":1949,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "45", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"split\":\"train\",", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "46", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"timestamp\":1593978838.5940151,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "47", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"type\":\"KvsAll\" 49 } Figure 5 : Example for training logging output for one epoch. Evaluation logging output is too verbose to add an example here. Please see https://github.com/uma-pi1/kge/blob/master/docs/examples/train_ and_valid_trace_after_one_epoch.yaml for an example for the output after one epoch of training and evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 29, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "48", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We did not attempt to use our best configuration with other frameworks because they only partly support the settings, e.g., they do not offer dropout or independent regularization for entity and relation embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The configuration was as follows: Dataset FB15K (Bordes et al., 2013) , batch size 512, model Com-plEx, effective parameter embedding size per entity/relation 128, optimizer Adagrad, negative sampling with negative log likelihood loss or sigmoid loss, no regularization. The hardware was a 8-core Intel Xeon E5-1630m v4.0, TitanXP GPU, dataset on SSD.", |
|
"cite_spans": [ |
|
{ |
|
"start": 48, |
|
"end": 69, |
|
"text": "(Bordes et al., 2013)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Runtime comparison experiment", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Bringing light into the dark: A large-scale evaluation of knowledge graph embedding models under a unified framework", |
|
"authors": [ |
|
{ |
|
"first": "Mehdi", |
|
"middle": [], |
|
"last": "Ali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Berrendorf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Charles", |
|
"middle": [ |
|
"Tapley" |
|
], |
|
"last": "Hoyt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Vermue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mikhail", |
|
"middle": [], |
|
"last": "Galkin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sahand", |
|
"middle": [], |
|
"last": "Sharifzadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Asja", |
|
"middle": [], |
|
"last": "Fischer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Volker", |
|
"middle": [], |
|
"last": "Tresp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jens", |
|
"middle": [], |
|
"last": "Lehmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2006.13365" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mehdi Ali, Max Berrendorf, Charles Tapley Hoyt, Lau- rent Vermue, Mikhail Galkin, Sahand Sharifzadeh, Asja Fischer, Volker Tresp, and Jens Lehmann. 2020. Bringing light into the dark: A large-scale evaluation of knowledge graph embedding models under a uni- fied framework. arXiv preprint arXiv:2006.13365.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Improving visual relationship detection using semantic modeling of scene descriptions", |
|
"authors": [ |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Baier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yunpu", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Volker", |
|
"middle": [], |
|
"last": "Tresp", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 16th International Semantic Web Conference (ISWC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephan Baier, Yunpu Ma, and Volker Tresp. 2017. Im- proving visual relationship detection using seman- tic modeling of scene descriptions. In Proceedings of the 16th International Semantic Web Conference (ISWC).", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Translating embeddings for modeling multirelational data", |
|
"authors": [ |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicolas", |
|
"middle": [], |
|
"last": "Usunier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alberto", |
|
"middle": [], |
|
"last": "Garc\u00eda-Dur\u00e1n", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oksana", |
|
"middle": [], |
|
"last": "Yakhnenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Advances in Neural Information Processing Systems 26: 27th Annual Conference on Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2787--2795", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antoine Bordes, Nicolas Usunier, Alberto Garc\u00eda- Dur\u00e1n, Jason Weston, and Oksana Yakhnenko. 2013. Translating embeddings for modeling multi- relational data. In Advances in Neural Information Processing Systems 26: 27th Annual Conference on Neural Information Processing Systems 2013. Pro- ceedings of a meeting held December 5-8, 2013, Lake Tahoe, Nevada, United States, pages 2787- 2795.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Can we predict new facts with open knowledge graph embeddings? A benchmark for open link prediction", |
|
"authors": [ |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Broscheit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kiril", |
|
"middle": [], |
|
"last": "Gashteovski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanjie", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rainer", |
|
"middle": [], |
|
"last": "Gemulla", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2296--2308", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samuel Broscheit, Kiril Gashteovski, Yanjie Wang, and Rainer Gemulla. 2020. Can we predict new facts with open knowledge graph embeddings? A bench- mark for open link prediction. In Proceedings of the 58th Annual Meeting of the Association for Compu- tational Linguistics, pages 2296-2308, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Lowdimensional hyperbolic knowledge graph embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Ines", |
|
"middle": [], |
|
"last": "Chami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adva", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Da-Cheng", |
|
"middle": [], |
|
"last": "Juan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frederic", |
|
"middle": [], |
|
"last": "Sala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sujith", |
|
"middle": [], |
|
"last": "Ravi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "R\u00e9", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ines Chami, Adva Wolf, Da-Cheng Juan, Frederic Sala, Sujith Ravi, and Christopher R\u00e9. 2020. Low- dimensional hyperbolic knowledge graph embed- dings. Annual Meeting of the Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "AmpliGraph: a library for representation learning on knowledge graphs", |
|
"authors": [ |
|
{ |
|
"first": "Luca", |
|
"middle": [], |
|
"last": "Costabello", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sumit", |
|
"middle": [], |
|
"last": "Pai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chan", |
|
"middle": [ |
|
"Le" |
|
], |
|
"last": "Van", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rory", |
|
"middle": [], |
|
"last": "Mc-Grath", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Mccarthy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pedro", |
|
"middle": [], |
|
"last": "Tabacof", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luca Costabello, Sumit Pai, Chan Le Van, Rory Mc- Grath, Nicholas McCarthy, and Pedro Tabacof. 2019. AmpliGraph: a library for representation learning on knowledge graphs.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Convolutional 2d knowledge graph embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Dettmers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pasquale", |
|
"middle": [], |
|
"last": "Minervini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pontus", |
|
"middle": [], |
|
"last": "Stenetorp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence, (AAAI-18), the 30th innovative Applications of Artificial Intelligence (IAAI-18), and the 8th AAAI Symposium on Educational Advances in Artificial Intelligence (EAAI-18)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1811--1818", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tim Dettmers, Pasquale Minervini, Pontus Stenetorp, and Sebastian Riedel. 2018. Convolutional 2d knowledge graph embeddings. In Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence, (AAAI-18), the 30th innovative Ap- plications of Artificial Intelligence (IAAI-18), and the 8th AAAI Symposium on Educational Advances in Artificial Intelligence (EAAI-18), New Orleans, Louisiana, USA, February 2-7, 2018, pages 1811- 1818.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "OpenKE: An open toolkit for knowledge embedding", |
|
"authors": [ |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shulin", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Lv", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yankai", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juanzi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xu Han, Shulin Cao, Xin Lv, Yankai Lin, Zhiyuan Liu, Maosong Sun, and Juanzi Li. 2018. OpenKE: An open toolkit for knowledge embedding. In Proceed- ings of the 2018 Conference on Empirical Methods in Natural Language Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Building expert systems, volume 1 of Advanced book program", |
|
"authors": [ |
|
{ |
|
"first": "Frederick", |
|
"middle": [], |
|
"last": "Hayes-Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1983, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Frederick Hayes-Roth. 1983. Building expert systems, volume 1 of Advanced book program. Addison- Wesley.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Canonical tensor decomposition for knowledge base completion", |
|
"authors": [ |
|
{ |
|
"first": "Timoth\u00e9e", |
|
"middle": [], |
|
"last": "Lacroix", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicolas", |
|
"middle": [], |
|
"last": "Usunier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Obozinski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 35th International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2869--2878", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Timoth\u00e9e Lacroix, Nicolas Usunier, and Guillaume Obozinski. 2018. Canonical tensor decomposition for knowledge base completion. In Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholmsm\u00e4ssan, Stock- holm, Sweden, July 10-15, 2018, pages 2869-2878.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "PyTorch-BigGraph: A Largescale Graph Embedding System", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Lerer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ledell", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiajun", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothee", |
|
"middle": [], |
|
"last": "Lacroix", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luca", |
|
"middle": [], |
|
"last": "Wehrstedt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhijit", |
|
"middle": [], |
|
"last": "Bose", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Peysakhovich", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2nd SysML Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Lerer, Ledell Wu, Jiajun Shen, Timothee Lacroix, Luca Wehrstedt, Abhijit Bose, and Alex Peysakhovich. 2019. PyTorch-BigGraph: A Large- scale Graph Embedding System. In Proceedings of the 2nd SysML Conference, Palo Alto, CA, USA.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Drug target discovery using knowledge graph embeddings", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Sameh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aayah", |
|
"middle": [], |
|
"last": "Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V\u00edt", |
|
"middle": [], |
|
"last": "Nounu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nov\u00e1\u010dek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 34th ACM/SI-GAPP Symposium on Applied Computing, SAC 19", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3297280.3297282" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sameh K. Mohamed, Aayah Nounu, and V\u00edt Nov\u00e1\u010dek. 2019. Drug target discovery using knowledge graph embeddings. In Proceedings of the 34th ACM/SI- GAPP Symposium on Applied Computing, SAC 19, page 1118, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "A review of relational machine learning for knowledge graphs", |
|
"authors": [ |
|
{ |
|
"first": "Maximilian", |
|
"middle": [], |
|
"last": "Nickel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Murphy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Volker", |
|
"middle": [], |
|
"last": "Tresp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evgeniy", |
|
"middle": [], |
|
"last": "Gabrilovich", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maximilian Nickel, Kevin Murphy, Volker Tresp, and Evgeniy Gabrilovich. 2015. A review of relational machine learning for knowledge graphs. Proceed- ings of the IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "A review of relational machine learning for knowledge graphs. Proceedings of the IEEE", |
|
"authors": [ |
|
{ |
|
"first": "Maximilian", |
|
"middle": [], |
|
"last": "Nickel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Murphy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Volker", |
|
"middle": [], |
|
"last": "Tresp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evgeniy", |
|
"middle": [], |
|
"last": "Gabrilovich", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "104", |
|
"issue": "", |
|
"pages": "11--33", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maximilian Nickel, Kevin Murphy, Volker Tresp, and Evgeniy Gabrilovich. 2016. A review of relational machine learning for knowledge graphs. Proceed- ings of the IEEE, 104(1):11-33.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Knowledge enhanced contextual word representations", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Logan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vidur", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "43--54", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1005" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew E. Peters, Mark Neumann, Robert Logan, Roy Schwartz, Vidur Joshi, Sameer Singh, and Noah A. Smith. 2019. Knowledge enhanced contextual word representations. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 43-54, Hong Kong, China. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Embedding multimodal relational data for knowledge base completion", |
|
"authors": [ |
|
{ |
|
"first": "Pouya", |
|
"middle": [], |
|
"last": "Pezeshkpour", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liyan", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3208--3218", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1359" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pouya Pezeshkpour, Liyan Chen, and Sameer Singh. 2018. Embedding multimodal relational data for knowledge base completion. In Proceedings of the 2018 Conference on Empirical Methods in Natu- ral Language Processing, pages 3208-3218, Brus- sels, Belgium. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "You CAN teach an old dog new tricks! on training knowledge graph embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Ruffinelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Broscheit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rainer", |
|
"middle": [], |
|
"last": "Gemulla", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "8th International Conference on Learning Representations", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Ruffinelli, Samuel Broscheit, and Rainer Gemulla. 2020. You CAN teach an old dog new tricks! on training knowledge graph embeddings. In 8th International Conference on Learning Represen- tations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "RotatE: Knowledge graph embedding by relational rotation in complex space", |
|
"authors": [ |
|
{ |
|
"first": "Zhiqing", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhi-Hong", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian-Yun", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 7th International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhiqing Sun, Zhi-Hong Deng, Jian-Yun Nie, and Jian Tang. 2019. RotatE: Knowledge graph embedding by relational rotation in complex space. In Proceed- ings of the 7th International Conference on Learning Representations (ICLR).", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "A reevaluation of knowledge graph completion methods", |
|
"authors": [ |
|
{ |
|
"first": "Zhiqing", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shikhar", |
|
"middle": [], |
|
"last": "Vashishth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soumya", |
|
"middle": [], |
|
"last": "Sanyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Partha", |
|
"middle": [], |
|
"last": "Talukdar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5516--5522", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhiqing Sun, Shikhar Vashishth, Soumya Sanyal, Partha Talukdar, and Yiming Yang. 2020. A re- evaluation of knowledge graph completion methods. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 5516-5522, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Observed versus latent features for knowledge base and text inference", |
|
"authors": [ |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 3rd Workshop on Continuous Vector Space Models and their Compositionality", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "57--66", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W15-4007" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kristina Toutanova and Danqi Chen. 2015. Observed versus latent features for knowledge base and text inference. In Proceedings of the 3rd Workshop on Continuous Vector Space Models and their Composi- tionality, pages 57-66, Beijing, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Complex embeddings for simple link prediction", |
|
"authors": [ |
|
{ |
|
"first": "Th\u00e9o", |
|
"middle": [], |
|
"last": "Trouillon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Welbl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u00c9ric", |
|
"middle": [], |
|
"last": "Gaussier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Bouchard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 33nd International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2071--2080", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Th\u00e9o Trouillon, Johannes Welbl, Sebastian Riedel,\u00c9ric Gaussier, and Guillaume Bouchard. 2016. Complex embeddings for simple link prediction. In Proceed- ings of the 33nd International Conference on Ma- chine Learning, ICML 2016, New York City, NY, USA, June 19-24, 2016, pages 2071-2080.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Knowledge graph embedding: A survey of approaches and applications", |
|
"authors": [ |
|
{ |
|
"first": "Quan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhendong", |
|
"middle": [], |
|
"last": "Mao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "IEEE Transactions on Knowledge and Data Engineering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Quan Wang, Zhendong Mao, Bin Wang, and Li Guo. 2017. Knowledge graph embedding: A survey of approaches and applications. IEEE Transactions on Knowledge and Data Engineering.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Kepler: A unified model for knowledge embedding and pretrained language representation", |
|
"authors": [ |
|
{ |
|
"first": "Xiaozhi", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianyu", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhaocheng", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juanzi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaozhi Wang, Tianyu Gao, Zhaocheng Zhu, Zhiyuan Liu, Juanzi Li, and Jian Tang. 2019. Kepler: A unified model for knowledge embedding and pre- trained language representation.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Dgl-ke: Training knowledge graph embeddings at scale", |
|
"authors": [ |
|
{ |
|
"first": "Da", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chao", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zeyuan", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Zi-Hao Ye", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zheng", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Karypis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Da Zheng, Xiang Song, Chao Ma, Zeyuan Tan, Zi- Hao Ye, J. Dong, Hao Xiong, Zheng Zhang, and G. Karypis. 2020. Dgl-ke: Training knowledge graph embeddings at scale. Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Graphvite: A high-performance cpu-gpu hybrid system for node embedding", |
|
"authors": [ |
|
{ |
|
"first": "Zhaocheng", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shizhen", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Qu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "The World Wide Web Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2494--2504", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhaocheng Zhu, Shizhen Xu, Meng Qu, and Jian Tang. 2019. Graphvite: A high-performance cpu-gpu hy- brid system for node embedding. In The World Wide Web Conference, pages 2494-2504. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Overview of related work", |
|
"authors": [], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hyperbolic KGE https://github.com/tensorflow/neural-structured-learning/tree/master/research/kg hyp emb Chami et al. (2020) ConvE https://github.com/TimDettmers/ConvE Dettmers et al. (2018) RotatE https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding Sun et al. (2019) Table 4: Overview of related work", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"num": null, |
|
"text": "A brief overview of LIBKGE's architecture.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"text": "A minimal configuration my experiment.yaml that defines 10 out of \u2248 100 configurable settings. All settings from the main configuration file my experiment.yaml and from the imported configurations are merged and stored in one combined file. No default settings are defined in the code.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF4": { |
|
"num": null, |
|
"text": "o r t : [ t o k e n p o o l e m b e d d e r ] 1 j o b . t y p e : t r a i n 2 d a t a s e t . name: fb15k \u2212237 i m i z e r : Adagrad 6 o p t i m i z e r a r g s . l r : 0 i t y e m b e d d e r : 12 t y p e : t o k e n p o o l e m b e d d e r 13 r e l a t i o n e m b e d d e r : 14 t y p e : l o o k u p e m b e d d e r", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF5": { |
|
"num": null, |
|
"text": "Example of using a token-based embedder.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF6": { |
|
"num": null, |
|
"text": "r d e r e d : True 26 -name: t r a i n . o p t i m i z e r a r g s . l", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"text": ").", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td>Search:Job</td><td>GridSearch</td></tr><tr><td>+device_pool: str[...*] +process_pool: TrainingJob +...</td><td>AxSearch Quasi random search, Bayesian</td></tr><tr><td>1</td><td>Optimization</td></tr><tr><td>*</td><td/></tr><tr><td>TrainingJob:Job</td><td/></tr><tr><td>+model: KgeModel</td><td/></tr><tr><td>+loss: Loss</td><td/></tr><tr><td>+optimizer: Optimizer</td><td/></tr><tr><td>+valid_job: EvaluationJob</td><td/></tr><tr><td>+...</td><td/></tr><tr><td>KgeModel</td><td/></tr><tr><td>+scorer: RelationalScorer</td><td/></tr><tr><td>+entity_embedder: KgeEmbedder</td><td/></tr><tr><td>+relation_embedder: KgeEmbedder</td><td/></tr><tr><td>+...</td><td/></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"text": "Comparing LIBKGE and other OSS that provide functionality around training KGE models for link prediction. All assessments have been made at the end of May 2020. Frameworks denotes focus on fostering KGE research with modularization, extensibility and coverage of relevant models and training methods. Large Scale denotes focus on extremely large-scale graphs, with support of training in multi-node or multi-gpu mode, or both.", |
|
"html": null, |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"text": "", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td>: The reported best performances (on the project's homepage or the related publication as of May 2020) for ComplEx on FB15K-237 for each project. The performances have been obtained with different amount of effort for hyperparameter optimization and should not be compared directly. Reported ranking met-rics: Mean Reciprocal Rank (MRR) and HITS@10.</td></tr></table>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |