|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:59:38.868035Z" |
|
}, |
|
"title": "Inductively Representing Out-of-Knowledge-Graph Entities by Optimal Estimation Under Translational Assumptions", |
|
"authors": [ |
|
{ |
|
"first": "Damai", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Key Lab of Computational Linguistics (MOE)", |
|
"institution": "Peking University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Hua", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Key Lab of Computational Linguistics (MOE)", |
|
"institution": "Peking University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Fuli", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Key Lab of Computational Linguistics (MOE)", |
|
"institution": "Peking University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Pengcheng", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Key Lab of Computational Linguistics (MOE)", |
|
"institution": "Peking University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Baobao", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Key Lab of Computational Linguistics (MOE)", |
|
"institution": "Peking University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Zhifang", |
|
"middle": [], |
|
"last": "Sui", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Key Lab of Computational Linguistics (MOE)", |
|
"institution": "Peking University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Nolan", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "David", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Conventional Knowledge Graph Completion (KGC) assumes that all test entities appear during training. However, in real-world scenarios, Knowledge Graphs (KG) evolve fast with outof-knowledge-graph (OOKG) entities added frequently, and we need to efficiently represent these entities. Most existing Knowledge Graph Embedding (KGE) methods cannot represent OOKG entities without costly retraining on the whole KG. To enhance efficiency, we propose a simple and effective method that inductively represents OOKG entities by their optimal estimation under translational assumptions. Moreover, given pretrained embeddings of the in-knowledge-graph (IKG) entities, our method even needs no additional learning. Experimental results on two KGC tasks with OOKG entities show that our method outperforms the previous methods by a large margin with higher efficiency. 1 * Equal contribution.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Conventional Knowledge Graph Completion (KGC) assumes that all test entities appear during training. However, in real-world scenarios, Knowledge Graphs (KG) evolve fast with outof-knowledge-graph (OOKG) entities added frequently, and we need to efficiently represent these entities. Most existing Knowledge Graph Embedding (KGE) methods cannot represent OOKG entities without costly retraining on the whole KG. To enhance efficiency, we propose a simple and effective method that inductively represents OOKG entities by their optimal estimation under translational assumptions. Moreover, given pretrained embeddings of the in-knowledge-graph (IKG) entities, our method even needs no additional learning. Experimental results on two KGC tasks with OOKG entities show that our method outperforms the previous methods by a large margin with higher efficiency. 1 * Equal contribution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Knowledge Graphs (KG) play a pivotal role in various NLP tasks, but generally suffer from incompleteness. To address this problem, Knowledge Graph Completion (KGC) aims to predict missing relations in a KG based on Knowledge Graph Embeddings (KGE). Transductive KGE methods, such as TransE (Bordes et al., 2013) and RotatE (Sun et al., 2019) , achieve success in conventional KGC, which assumes that all test entities appear during training. However, in real-world scenarios, KGs evolve fast with out-of-knowledge-graph (OOKG) entities added frequently. To represent these emerging OOKG entities, transductive KGE methods need to retrain on the whole KG frequently, which Figure 1 : An example of KGC with OOKG entities. When an OOKG entity \"TENET\" is added, we can represent it efficiently via information of its IKG neighbors to predict its missing relations with other entities.", |
|
"cite_spans": [ |
|
{ |
|
"start": 290, |
|
"end": 311, |
|
"text": "(Bordes et al., 2013)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 323, |
|
"end": 341, |
|
"text": "(Sun et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 672, |
|
"end": 680, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "is extremely time-consuming. Faced with this problem, we are in urgent need of an efficient method to tackle KGC with OOKG entities. Figure 1 shows an example of KGC with OOKG entities. Based on an existing KG, a new movie \"TENET\" is added as an OOKG entity with some auxiliary relations that connect it with some inknowledge-graph (IKG) entities. To predict the missing relations between \"TENET\" and other entities, we need to obtain its embedding first. Being aware that \"TENET\" is directed by \"Christopher Nolan\", is an \"action\" movie, and is starred by \"John David Washington\", we can combine these clues to profile \"TENET\" and estimate its embedding. This embedding can then be used to predict whether its relation with \"English\" is \"language\".", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 133, |
|
"end": 141, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In recent years, some inductive methods have been proposed for OOKG entities without retraining. Hamaguchi et al. (2017) ; Wang et al. (2019) ; Bi et al. (2020) ; Zhao et al. (2020) adopt Graph Neural Networks (GNN) to aggregate the IKG neighbors to represent the OOKG entities. These methods are effective but require relatively complex calculations, which could be simplified for higher efficiency. Xie et al. (2016 Xie et al. ( , 2017 ; Shi and Weninger (2018) utilize external resources such as entity descriptions or images to enrich the OOKG entity embedding, thus avoiding retraining. How-ever, high-quality external resources are expensive to acquire, which may limit the feasibility.", |
|
"cite_spans": [ |
|
{ |
|
"start": 97, |
|
"end": 120, |
|
"text": "Hamaguchi et al. (2017)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 123, |
|
"end": 141, |
|
"text": "Wang et al. (2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 144, |
|
"end": 160, |
|
"text": "Bi et al. (2020)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 163, |
|
"end": 181, |
|
"text": "Zhao et al. (2020)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 401, |
|
"end": 417, |
|
"text": "Xie et al. (2016", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 418, |
|
"end": 437, |
|
"text": "Xie et al. ( , 2017", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 440, |
|
"end": 463, |
|
"text": "Shi and Weninger (2018)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we propose an inductive method that derives formulas from translational assumptions to estimate OOKG entity embeddings. Compared to existing methods for KGC with OOKG entities, our method has simpler calculations and does not need external resources. For a triplet (h, r, t), translational assumptions of translational distance KGE models suppose that embedding h can establish a connection with t via an r-specific operation. Assuming that h is an OOKG entity and t is an IKG entity, we show that if a translational assumption can derive a specific formula to compute h via pretrained t and r, then there will be no other candidate for h that better fits this translational assumption. Therefore, the computed h is the optimal estimation of the OOKG entity under this translational assumption. Among existing typical KGE models, we discover that translational assumptions of TransE and RotatE can derive these specific estimation formulas. Therefore, based on them, we design two instances of our method called InvTransE and InvRotatE, respectively. Note that our estimation formulas have no trainable parameters, so our method needs no additional learning when given pretrained IKG embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our contributions are summarized as follows: (1) We propose a simple and effective method to inductively represent OOKG entities by their optimal estimation under translational assumptions. (2) Our method needs no external resources. Given pretrained IKG embeddings, our method even needs no additional learning. (3) We evaluate our method on two KGC tasks with OOKG entities. Experimental results show that our method outperforms the state-of-the-art methods by a large margin with higher efficiency, and maintains a robust performance even with higher OOKG entity ratios.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Let E denote the IKG entity set and R denote the relation set. K train is the training set where all entities are IKG. K aux is the auxiliary set connecting OOKG and IKG entities during inference, where each triplet contains an OOKG and an IKG entity. We define the K-neighbor set of an entity e as all its neighbor entities and relations in K: N K (e) = {(r, t)|(e, r, t) \u2208 K} \u222a {(h, r)|(h, r, e) \u2208 K}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology 2.1 Notations and problem formulation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Using notations above, we formulate our prob- lem as follows: Given K aux and IKG embeddings pretrained on K train , we need to represent an OOKG entity e \u2208 E as an embedding. This embedding can then be used to tackle KGC with OOKG entities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology 2.1 Notations and problem formulation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "As shown in Figure 2 , our proposed method is composed of an estimator and a reducer. The estimator aims to compute a set of candidate embeddings for an OOKG entity via its IKG neighbor information. The reducer aims to reduce these candidates to the final embedding of the OOKG entity.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 20, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Proposed method", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "For an OOKG entity e, given its IKG neighbors N Kaux (e) with pretrained embeddings, the estimator aims to compute a set of candidate embeddings. Except TransE and RotatE, other typical KGE models have relatively complex calculations in their translational assumptions. These complex calculations prevent their translational assumptions from deriving specific estimation formulas for OOKG entities. 2 Therefore, we design two sets of estimation formulas based on TransE and RotatE, respectively. To be specific, if e is the head entity, we can obtain its optimal estimation e by the following formulas:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Estimator", |
|
"sec_num": "2.2.1" |
|
}, |
|
{ |
|
"text": "e = t \u2212 r, for InvTransE, t \u2022 r \u22121 , for InvRotatE,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Estimator", |
|
"sec_num": "2.2.1" |
|
}, |
|
{ |
|
"text": "where \u2022 denotes the element-wise product, r \u22121 denotes the element-wise inversion. Otherwise, if e is the tail entity, we can obtain its optimal estimation e by the following formulas:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Estimator", |
|
"sec_num": "2.2.1" |
|
}, |
|
{ |
|
"text": "e = h + r, for InvTransE, h", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Estimator", |
|
"sec_num": "2.2.1" |
|
}, |
|
{ |
|
"text": "\u2022 r, for InvRotatE.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Estimator", |
|
"sec_num": "2.2.1" |
|
}, |
|
{ |
|
"text": "After the estimator computes |N Kaux (e)| candidate embeddings, the reducer aims to reduce them to the final embedding of the OOKG entity by weighted average. We design two weighting functions. Correlation-based weights are query-aware. Inspired by Wang et al. (2019) , we first use the conditional probability to model the correlation between two relations:", |
|
"cite_spans": [ |
|
{ |
|
"start": 249, |
|
"end": 267, |
|
"text": "Wang et al. (2019)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reducer", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "P (r 2 |r 1 ) = e\u2208E 1 (r 1 , r 2 \u2208 N K train (e) ) e\u2208E 1 (r 1 \u2208 N K train (e) )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reducer", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": ".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reducer", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "When the query relation r q is specified, we assign more weight to the candidate that is computed via a more relevant relation to r q :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reducer", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "w corr ( e) = P (r e |r q ) + P (r q |r e ) s Z corr ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reducer", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "where Z corr is the normalization factor, r e is the neighbor relation via which e is computed, s is a hyper-parameter set to 4.0.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reducer", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "Degree-based weights focus more on the entity with higher degree in the training set:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reducer", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "w deg ( e) = log (d e + \u03b4) Z deg ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reducer", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "where Z deg is the normalization factor, d e is the degree of the neighbor entity via which e is computed, \u03b4 is a smoothing factor set to 0.1. Based on these weighting functions, the final embedding of the OOKG entity e is computed by e = e\u2208C e \u2022 w corr/deg ( e),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reducer", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "where C denotes the candidate embedding set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reducer", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "We conduct experiments on two KGC tasks with OOKG entities: link prediction and triplet classification. For link prediction, we use two datasets released by Wang et al. (2019) built based on FB15k (Bordes et al., 2013) : FB15k-Head-10 and FB15k-Tail-10. For triplet classification, we use nine datasets released by Hamaguchi et al. (2017) built based on WN11 (Socher et al., 2013) : WN11-Head-1000, WN11-Head-3000, WN11-Head-5000, WN11-Tail-1000, WN11-Tail-3000, WN11-Tail-5000, WN11-Both-1000, WN11-Both-3000, and WN11-Both-5000. Each of the datasets mentioned above is composed of four sets: a training set, an auxiliary set, a validation set, and a test set. Each triplet in the training and validation sets contains only IKG entities. Each triplet in the auxiliary set contains an OOKG entity and an IKG entity. Each triplet in the test set contains at least one OOKG entity. The dataset statistics are shown in Table 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 175, |
|
"text": "Wang et al. (2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 197, |
|
"end": 218, |
|
"text": "(Bordes et al., 2013)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 315, |
|
"end": 338, |
|
"text": "Hamaguchi et al. (2017)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 359, |
|
"end": 380, |
|
"text": "(Socher et al., 2013)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 916, |
|
"end": 923, |
|
"text": "Table 1", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Tasks and datasets", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We tune pretraining hyper-parameters on the validation set. We use Adam (Kingma and Ba, 2015) with an initial learning rate of 10 \u22123 as the optimizer and a batch size of 1,024. For link prediction, we use 1,000-dimensional embeddings and the correlationbased weights. For triplet classification, we use 300-dimensional embeddings and the degree-based weights. Details are included in Appendix B.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental settings", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "For link prediction, we compare our method with three strong GNN-based baselines. GNN-MEAN (Hamaguchi et al., 2017 ) uses a mean function to aggregate neighbors. GNN-LSTM adopts LSTM for aggregation. LAN (Wang et al., 2019) adopts both rule-and network-based attention mechanisms for aggregation. For triplet classification, we compare with two more competitive GNN-based baselines. ConvLayer (Bi et al., 2020) uses convolutional layers as the transition function. FCLEntity (Zhao et al., 2020) uses fullyconnected networks as the transition function with an attention-based aggregation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 114, |
|
"text": "(Hamaguchi et al., 2017", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 204, |
|
"end": 223, |
|
"text": "(Wang et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 393, |
|
"end": 410, |
|
"text": "(Bi et al., 2020)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 475, |
|
"end": 494, |
|
"text": "(Zhao et al., 2020)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "For link prediction, we use Mean Reciprocal Rank (MRR) and the proportion of ground truth entities ranked in top-k (Hits@k, k \u2208 {1, 10}). All the metrics are filtered versions that exclude false negative candidates. For triplet classification, we use Accuracy. We determine relation-specific thresholds \u03b4 r by maximizing the accuracy on the validation set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation metrics", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Evaluation results of link prediction are shown in FB15k-Head-10 108,854 11,339 249,798 2,811 1,170 10,336 2,082 FB15k-Tail-10 99,783 10,190 261,341 2,987 1,126 10,603 1,934 WN11-Head-1000 108,197 4,561 1,938 955 11 37,700 340 WN11-Head-3000 99,963 4,068 5,311 2,686 11 36,646 985 WN11-Head-5000 92,309 3,688 8,048 4,252 11 35, Table 2 : Evaluation results (MRR, Hits@k) of link prediction. Bold is the best. Underline is the second best. diction results. This again validates the effect of our method.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 51, |
|
"end": 366, |
|
"text": "FB15k-Head-10 108,854 11,339 249,798 2,811 1,170 10,336 2,082 FB15k-Tail-10 99,783 10,190 261,341 2,987 1,126 10,603 1,934 WN11-Head-1000 108,197 4,561 1,938 955 11 37,700 340 WN11-Head-3000 99,963 4,068 5,311 2,686 11 36,646 985 WN11-Head-5000 92,309 3,688 8,048 4,252 11 35,", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 367, |
|
"end": 374, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Main results", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "How does our method perform with increasing OOKG entity ratios? We compare the triplet classification results of InvTransE, LAN, and GNN-MEAN with increasing OOKG entity ratios in Figure 3. We find that, when the OOKG entity ratio increases, the performance of our method drops the slowest. This suggests that our method is more robust to increasing OOKG entity ratios. Table 4 , the performance without our weighting functions drops dramatically. This verifies the effectiveness of our weighting functions. How does the number of neighbors impact the performance? We randomly select up to k \u2208 {32, 8, 1} IKG neighbors to use. As shown in Table 4, as the number of used neighbors decreases, the performance drops. This suggests that using more neighbors can lead to better performance. Moreover, we find that InvTransE can outperform previous methods using only up to 32 neighbors.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 180, |
|
"end": 186, |
|
"text": "Figure", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 370, |
|
"end": 377, |
|
"text": "Table 4", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "3.6" |
|
}, |
|
{ |
|
"text": "Transductive KGE methods map entities and relations to embeddings, and then use score functions to measure the triplet salience. TransE (Bordes et al., 2013) pioneers translational distance methods and is widely-used. It derives a series of methods, such as TransH (Wang et al., 2014) , TransR (Lin et al., 2015) , and RotatE (Sun et al., 2019) . Besides, semantic matching methods form another mainstream (Nickel et al., 2011; Trouillon et al., 2016; Nickel et al., 2016; Balazevic et al., 2019) . These transductive KGE methods achieve success in conventional KGC, but fail to directly represent OOKG entities efficiently. To improve efficiency, some inductive methods adopt GNN to aggregate IKG neighbors to produce embeddings for OOKG entities (Hamaguchi et al., 2017; Wang et al., 2019; Bi et al., 2020; Zhao et al., 2020) . These methods are effective but need relatively complex calculations. Other inductive methods incorporate external resources to enrich embeddings and represent OOKG entities via only external resources (Xie et al., 2016; Shi and Weninger, 2018; Xie et al., 2017) . However, high-quality external resources are hard and expensive to acquire, which may limit the feasibility.", |
|
"cite_spans": [ |
|
{ |
|
"start": 265, |
|
"end": 284, |
|
"text": "(Wang et al., 2014)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 294, |
|
"end": 312, |
|
"text": "(Lin et al., 2015)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 326, |
|
"end": 344, |
|
"text": "(Sun et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 406, |
|
"end": 427, |
|
"text": "(Nickel et al., 2011;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 428, |
|
"end": 451, |
|
"text": "Trouillon et al., 2016;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 452, |
|
"end": 472, |
|
"text": "Nickel et al., 2016;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 473, |
|
"end": 496, |
|
"text": "Balazevic et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 748, |
|
"end": 772, |
|
"text": "(Hamaguchi et al., 2017;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 773, |
|
"end": 791, |
|
"text": "Wang et al., 2019;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 792, |
|
"end": 808, |
|
"text": "Bi et al., 2020;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 809, |
|
"end": 827, |
|
"text": "Zhao et al., 2020)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 1032, |
|
"end": 1050, |
|
"text": "(Xie et al., 2016;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1051, |
|
"end": 1074, |
|
"text": "Shi and Weninger, 2018;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1075, |
|
"end": 1092, |
|
"text": "Xie et al., 2017)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "This paper aims to efficiently represent OOKG entities. We propose a simple and effective method that inductively represents OOKG entities by their optimal estimation under translational assumptions. Moreover, given pretrained IKG embeddings, our method needs no additional learning. Evaluations on two KGC tasks show that our method outperforms the state-of-the-art methods by a large margin with higher efficiency, and maintains a robust performance with higher OOKG entity ratios.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Detailed proof is included in Appendix A.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This paper is supported by the National Key Research and Development Program of China (2020AAA0106700) and NSFC project (U19A2065).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Can Derive Specific Estimation Formulas for OOKG Entities?For a triplet (h, r, t), translational assumptions of KGE models suppose that h can establish a connection with t via an r-specific operation, which can be formulated by the following equation:where F r (\u2022) is an r-specific function that is determined by the specific KGE model. Without loss of generality, we may assume that h is an OOKG entity and t is an IKG entity. Under a translational assumption, we can obtain a specific estimation formula for h if and only if (1) we regard h as unknown, and its solution in Equation 1 exists,the solution is unique. If the above two conditions hold, the unique solution of h is the optimal estimation under the translational assumption, since no other candidate for h can better fit Equation 1. In the following parts, we analyze translational assumptions of four KGE models (TransE, RotatE, TransH, TransR) as examples.", |
|
"cite_spans": [ |
|
{ |
|
"start": 876, |
|
"end": 908, |
|
"text": "(TransE, RotatE, TransH, TransR)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appendices A Which Translational Assumptions", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For TransE, its translational assumption is formulated byIn this case, we can obtain a unique solution of h by the following steps:This computed h is the optimal estimation under the translational assumption.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 TransE", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For RotatE, its translational assumption is formulated byIn this case, we can obtain a unique solution of h by the following steps:This computed h is the optimal estimation under the translational assumption.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.2 RotatE", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For TransH, its translational assumption is formulated bywhere w r is the unit normal vector of the plane P that r lies on. From the translational assumption, we can derive the following equations: : Hyper-parameters for two categories of datasets. We use the same hyper-parameters for two FB15k-based datasets and the same hyper-parameters for nine WN11-based datasets. On each dataset, we use the same hyperparameters for two pretrained models. d denotes the embedding dimension. \u03b3 denotes the margin. \u03b1 denotes the sampling temperature. n denotes the negative sampling size. L2 denotes the parameter of L2 regularization, where N/A means no regularization.From a geometric perspective, h \u2212 w r hw r is the projection of h on the plane P . From the translational assumption, we can only deduce that the projection of h is equal to v. However, there exist infinitely many possible h that can satisfy this condition. Therefore, the solution of h is not unique, and we cannot obtain a specific estimation formula from the translational assumption of TransH.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.3 TransH", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For TransR, its translational assumption is formulated bywhere M r is an r-specific matrix. From the translational assumption, we can derive the following equations:In this case, we derive a system of linear equations from the translational assumption. In this system, there exists a unique solution for h if and only if the rank of the coefficient matrix M r is equal to the rank of the augmented matrix [M r ; v]. However, M r is automatically learned by TransR without this restriction. Therefore, we cannot guarantee that there exists a unique solution for h, and we cannot obtain a specific estimation formula from the translational assumption of TransR.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 TransR", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To pretrain the TransE and RotatE models, we adopt the self-adversarial negative sampling loss proposed by Sun et al. (2019) in consideration of its good performance on training TransE and Ro-tatE. The self-adversarial negative sampling loss L is formulated as:where \u03c3 is the sigmoid function, \u03b3 is the margin, n is the negative sampling size and (h i , r, t i ) is the i-th negative sample triplet. D (\u2022) is the distance function. D (h, r, t) is equal to h + r \u2212 t 1/2 for TransE and is equal to h \u2022 r \u2212 t 1/2 for RotatE. p is the self-adversarial weight function which gives more weight to the high-scored negative samples:where \u03b1 is a hyper-parameter called sampling temperature to be tuned. F(\u2022) is the score function that is equal to \u2212D(\u2022). We conduct each experiment on a single Nvidia Geforce GTX-1080Ti GPU and tune hyperparameters on the validation set. Generally, we set the batch size to 1,024 and use Adam (Kingma and Ba, 2015) with an initial learning rate of 10 \u22123 as the optimizer. We choose the correlation-based weights for link prediction and choose the degreebased weights with a smoothing factor of 0.1 for triplet classification. Other hyper-parameters are shown in Table 5 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 124, |
|
"text": "Sun et al. (2019)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1187, |
|
"end": 1194, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "B Details of Experimental Settings", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "TuckER: Tensor factorization for knowledge graph completion", |
|
"authors": [ |
|
{ |
|
"first": "Ivana", |
|
"middle": [], |
|
"last": "Balazevic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carl", |
|
"middle": [], |
|
"last": "Allen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Hospedales", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "EMNLP-IJCNLP 2019", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5184--5193", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ivana Balazevic, Carl Allen, and Timothy M. Hospedales. 2019. TuckER: Tensor factorization for knowledge graph completion. In EMNLP-IJCNLP 2019, pages 5184-5193.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Knowledge transfer for out-ofknowledge-base entities: Improving graph-neuralnetwork-based embedding using convolutional layers", |
|
"authors": [ |
|
{ |
|
"first": "Zhongqin", |
|
"middle": [], |
|
"last": "Bi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianchen", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ping", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yongbin", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "IEEE Access", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "159039--159049", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhongqin Bi, Tianchen Zhang, Ping Zhou, and Yong- bin Li. 2020. Knowledge transfer for out-of- knowledge-base entities: Improving graph-neural- network-based embedding using convolutional lay- ers. IEEE Access, 8:159039-159049.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Translating embeddings for modeling multirelational data", |
|
"authors": [ |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicolas", |
|
"middle": [], |
|
"last": "Usunier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alberto", |
|
"middle": [], |
|
"last": "Garcia-Duran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oksana", |
|
"middle": [], |
|
"last": "Yakhnenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "NeurIPS 2013", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2787--2795", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antoine Bordes, Nicolas Usunier, Alberto Garcia- Duran, Jason Weston, and Oksana Yakhnenko. 2013. Translating embeddings for modeling multi- relational data. In NeurIPS 2013, pages 2787-2795.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Knowledge transfer for out-of-knowledge-base entities: A graph neural network approach", |
|
"authors": [ |
|
{ |
|
"first": "Takuo", |
|
"middle": [], |
|
"last": "Hamaguchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hidekazu", |
|
"middle": [], |
|
"last": "Oiwa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masashi", |
|
"middle": [], |
|
"last": "Shimbo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuji", |
|
"middle": [], |
|
"last": "Matsumoto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1802--1808", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Takuo Hamaguchi, Hidekazu Oiwa, Masashi Shimbo, and Yuji Matsumoto. 2017. Knowledge transfer for out-of-knowledge-base entities: A graph neural net- work approach. In IJCAI 2017, pages 1802-1808.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In ICLR 2015.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Learning entity and relation embeddings for knowledge graph completion", |
|
"authors": [ |
|
{ |
|
"first": "Yankai", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuan", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "AAAI 2015", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2181--2187", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yankai Lin, Zhiyuan Liu, Maosong Sun, Yang Liu, and Xuan Zhu. 2015. Learning entity and relation em- beddings for knowledge graph completion. In AAAI 2015, pages 2181-2187.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Holographic embeddings of knowledge graphs", |
|
"authors": [ |
|
{ |
|
"first": "Maximilian", |
|
"middle": [], |
|
"last": "Nickel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lorenzo", |
|
"middle": [], |
|
"last": "Rosasco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomaso", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Poggio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "AAAI 2016", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1955--1961", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maximilian Nickel, Lorenzo Rosasco, and Tomaso A. Poggio. 2016. Holographic embeddings of knowl- edge graphs. In AAAI 2016, pages 1955-1961.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A three-way model for collective learning on multi-relational data", |
|
"authors": [ |
|
{ |
|
"first": "Maximilian", |
|
"middle": [], |
|
"last": "Nickel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hans-Peter", |
|
"middle": [], |
|
"last": "Volker Tresp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kriegel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "ICML 2011", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "809--816", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maximilian Nickel, Volker Tresp, and Hans-Peter Kriegel. 2011. A three-way model for collective learning on multi-relational data. In ICML 2011, pages 809-816.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Open-world knowledge graph completion", |
|
"authors": [ |
|
{ |
|
"first": "Baoxu", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Weninger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "AAAI 2018", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1957--1964", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Baoxu Shi and Tim Weninger. 2018. Open-world knowledge graph completion. In AAAI 2018, pages 1957-1964.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Reasoning with neural tensor networks for knowledge base completion", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "NeurIPS 2013", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "926--934", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard Socher, Danqi Chen, Christopher D Manning, and Andrew Ng. 2013. Reasoning with neural ten- sor networks for knowledge base completion. In NeurIPS 2013, pages 926-934.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "RotatE: Knowledge graph embedding by relational rotation in complex space", |
|
"authors": [ |
|
{ |
|
"first": "Zhiqing", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhi-Hong", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian-Yun", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhiqing Sun, Zhi-Hong Deng, Jian-Yun Nie, and Jian Tang. 2019. RotatE: Knowledge graph embedding by relational rotation in complex space. In ICLR 2019.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "ComplEx embeddings for simple link prediction", |
|
"authors": [ |
|
{ |
|
"first": "Th\u00e9o", |
|
"middle": [], |
|
"last": "Trouillon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Welbl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u00c9ric", |
|
"middle": [], |
|
"last": "Gaussier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Bouchard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "ICML 2016", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2071--2080", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Th\u00e9o Trouillon, Johannes Welbl, Sebastian Riedel,\u00c9ric Gaussier, and Guillaume Bouchard. 2016. ComplEx embeddings for simple link prediction. In ICML 2016, pages 2071-2080.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Logic attention based neighborhood aggregation for inductive knowledge graph embedding", |
|
"authors": [ |
|
{ |
|
"first": "Peifeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jialong", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenliang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rong", |
|
"middle": [], |
|
"last": "Pan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "AAAI 2019", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7152--7159", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peifeng Wang, Jialong Han, Chenliang Li, and Rong Pan. 2019. Logic attention based neighborhood ag- gregation for inductive knowledge graph embedding. In AAAI 2019, pages 7152-7159.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Knowledge graph embedding by translating on hyperplanes", |
|
"authors": [ |
|
{ |
|
"first": "Zhen", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianwen", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianlin", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zheng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "AAAI 2014", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1112--1119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhen Wang, Jianwen Zhang, Jianlin Feng, and Zheng Chen. 2014. Knowledge graph embedding by trans- lating on hyperplanes. In AAAI 2014, pages 1112- 1119.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Representation learning of knowledge graphs with entity descriptions", |
|
"authors": [ |
|
{ |
|
"first": "Ruobing", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jia", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huanbo", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "AAAI 2016", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2659--2665", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruobing Xie, Zhiyuan Liu, Jia Jia, Huanbo Luan, and Maosong Sun. 2016. Representation learning of knowledge graphs with entity descriptions. In AAAI 2016, pages 2659-2665.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Image-embodied knowledge representation learning", |
|
"authors": [ |
|
{ |
|
"first": "Ruobing", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huanbo", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3140--3146", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruobing Xie, Zhiyuan Liu, Huanbo Luan, and Maosong Sun. 2017. Image-embodied knowledge representation learning. In IJCAI 2017, pages 3140- 3146.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Embedding entities and relations for learning and inference in knowledge bases", |
|
"authors": [ |
|
{ |
|
"first": "Bishan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wen-Tau", |
|
"middle": [], |
|
"last": "Yih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bishan Yang, Wen-tau Yih, Xiaodong He, Jianfeng Gao, and Li Deng. 2015. Embedding entities and relations for learning and inference in knowledge bases. In ICLR 2015.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Attention-based aggregation graph networks for knowledge graph information transfer", |
|
"authors": [ |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weijia", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yusheng", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "PAKDD 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "542--554", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ming Zhao, Weijia Jia, and Yusheng Huang. 2020. Attention-based aggregation graph networks for knowledge graph information transfer. In PAKDD 2020, pages 542-554.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "An illustration of our method, which consists of an estimator and a reducer.", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"text": "Results with increasing OOKG entity ratios.", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"text": "From the table, we observe that: (1) With the optimal estimation under translational assumptions, both instances of our method significantly outperform all baselines. (2) Neighbors are unordered, so order-insensitive methods like ours or LAN perform better, while GNN-LSTM that captures ordered information performs worse. For triplet classification, we show the results in Table 3. The table shows that our method achieves the best performance, consistent with the link pre-", |
|
"num": null, |
|
"content": "<table><tr><td>Dataset</td><td>|Ktrain|</td><td>|Kvalid|</td><td>|Kaux|</td><td>|Ktest|</td><td>|R|</td><td>|E|</td><td>|E |</td></tr></table>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"text": "Statistics of datasets with OOKG entities. These datasets are built based on FB15k or WN11 and named in the form of \"Base-Pos-Num\". Base denotes the based datasets. Pos denotes the position of OOKG entities in test triplets. Num distinguishes different numbers of OOKG entities represented by |E |.", |
|
"num": null, |
|
"content": "<table><tr><td>Method</td><td>FB15k-Head-10 MRR H@10 H@1 MRR H@10 H@1 FB15k-Tail-10</td></tr><tr><td colspan=\"2\">GNN-LSTM 0.254 42.9 16.2 0.219 37.3 14.3</td></tr><tr><td colspan=\"2\">GNN-MEAN 0.310 48.0 22.2 0.251 41.0 17.1</td></tr><tr><td>LAN</td><td>0.394 56.6 30.2 0.314 48.2 22.7</td></tr><tr><td colspan=\"2\">InvTransE 0.462 60.4 38.5 0.357 48.7 29.0</td></tr><tr><td colspan=\"2\">InvRotatE 0.453 60.4 36.9 0.362 49.1 29.3</td></tr></table>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF6": { |
|
"text": "Evaluation results (Accuracy) of triplet classification. Bold is the best. Underline is the second best. The results of all five baselines are taken from their original papers.", |
|
"num": null, |
|
"content": "<table><tr><td>Method</td><td>MRR</td><td>H@10</td><td>H@1</td></tr><tr><td>InvTransE (Full)</td><td>0.462</td><td>60.4</td><td>38.5</td></tr><tr><td>Uniform Weights</td><td>0.361</td><td>52.0</td><td>28.1</td></tr><tr><td>Up to 32 Neighbors</td><td>0.447</td><td>59.2</td><td>37.2</td></tr><tr><td>Up to 8 Neighbors</td><td>0.386</td><td>52.0</td><td>31.3</td></tr><tr><td>Only 1 Neighbor</td><td>0.246</td><td>37.9</td><td>18.1</td></tr></table>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF7": { |
|
"text": "Ablation experiment results for InvTransE on the FB15k-Head-10 dataset of link prediction.", |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |