|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:58:10.004077Z" |
|
}, |
|
"title": "Modeling Graph Structure via Relative Position for Text Generation from Knowledge Graphs", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Schmitt", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "LMU Munich", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Leonardo", |
|
"middle": [ |
|
"F R" |
|
], |
|
"last": "Ribeiro", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Research Training Group AIPHES and UKP Lab", |
|
"institution": "Technische Universit\u00e4t Darmstadt", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Dufter", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "LMU Munich", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Research Training Group AIPHES and UKP Lab", |
|
"institution": "Technische Universit\u00e4t Darmstadt", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "LMU Munich", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We present Graformer, a novel Transformerbased encoder-decoder architecture for graphto-text generation. With our novel graph selfattention, the encoding of a node relies on all nodes in the input graph-not only direct neighbors-facilitating the detection of global patterns. We represent the relation between two nodes as the length of the shortest path between them. Graformer learns to weight these nodenode relations differently for different attention heads, thus virtually learning differently connected views of the input graph. We evaluate Graformer on two popular graph-to-text generation benchmarks, AGENDA and WebNLG, where it achieves strong performance while using many fewer parameters than other approaches. 1", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We present Graformer, a novel Transformerbased encoder-decoder architecture for graphto-text generation. With our novel graph selfattention, the encoding of a node relies on all nodes in the input graph-not only direct neighbors-facilitating the detection of global patterns. We represent the relation between two nodes as the length of the shortest path between them. Graformer learns to weight these nodenode relations differently for different attention heads, thus virtually learning differently connected views of the input graph. We evaluate Graformer on two popular graph-to-text generation benchmarks, AGENDA and WebNLG, where it achieves strong performance while using many fewer parameters than other approaches. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "A knowledge graph (KG) is a flexible data structure commonly used to store both general world knowledge (Auer et al., 2008) and specialized information, e.g., in biomedicine (Wishart et al., 2018) and computer vision (Krishna et al., 2017) . Generating a natural language description of such a graph (KG\u2192text) makes the stored information accessible to a broader audience of end users. It is therefore important for KG-based question answering (Bhowmik and de Melo, 2018) , datato-document generation (Moryossef et al., 2019; Koncel-Kedziorski et al., 2019) and interpretability of KGs in general (Schmitt et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 123, |
|
"text": "(Auer et al., 2008)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 174, |
|
"end": 196, |
|
"text": "(Wishart et al., 2018)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 217, |
|
"end": 239, |
|
"text": "(Krishna et al., 2017)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 444, |
|
"end": 471, |
|
"text": "(Bhowmik and de Melo, 2018)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 501, |
|
"end": 525, |
|
"text": "(Moryossef et al., 2019;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 526, |
|
"end": 557, |
|
"text": "Koncel-Kedziorski et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 597, |
|
"end": 619, |
|
"text": "(Schmitt et al., 2020)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Recent approaches to KG\u2192text employ encoderdecoder architectures: the encoder first computes vector representations of the graph's nodes, the decoder then uses them to predict the text sequence. Typical encoder choices are graph neural networks based on message passing between direct neighbors in the graph (Kipf and Welling, 2017; Veli\u010dkovi\u0107 et al., 2018) or variants of Transformer (Vaswani et al., 2017) that apply self-attention on all nodes together, including those that are not directly connected. To avoid losing information, the latter approaches use edge or node labels from the shortest path when computing the attention between two nodes (Zhu et al., 2019; Cai and Lam, 2020) . Assuming the existence of a path between any two nodes is particularly problematic for KGs: a set of KG facts often does not form a connected graph.", |
|
"cite_spans": [ |
|
{ |
|
"start": 308, |
|
"end": 332, |
|
"text": "(Kipf and Welling, 2017;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 333, |
|
"end": 357, |
|
"text": "Veli\u010dkovi\u0107 et al., 2018)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 385, |
|
"end": 407, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 651, |
|
"end": 669, |
|
"text": "(Zhu et al., 2019;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 670, |
|
"end": 688, |
|
"text": "Cai and Lam, 2020)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We propose a flexible alternative that neither needs such an assumption nor uses label information to model graph structure: a Transformerbased encoder that interprets the lengths of shortest paths in a graph as relative position information and thus, by means of multi-head attention, dynamically learns different structural views of the input graph with differently weighted connection patterns. We call this new architecture Graformer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Following previous work, we evaluate Graformer on two benchmarks: (i) the AGENDA dataset (Koncel-Kedziorski et al., 2019) , i.e., the generation of scientific abstracts from automatically extracted entities and relations specific to scientific text, and (ii) the WebNLG challenge dataset (Gardent et al., 2017) , i.e., the task of generating text from DBPedia subgraphs. On both datasets, Graformer achieves more than 96% of the state-of-the-art performance while using only about half as many parameters.", |
|
"cite_spans": [ |
|
{ |
|
"start": 89, |
|
"end": 121, |
|
"text": "(Koncel-Kedziorski et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 288, |
|
"end": 310, |
|
"text": "(Gardent et al., 2017)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In summary, our contributions are as follows: (1) We develop Graformer, a novel graph-to-text architecture that interprets shortest path lengths as relative position information in a graph self-attention network. (2) Graformer achieves competitive performance on two popular KG-to-text generation benchmarks, showing that our architecture can learn about graph structure without any guidance other than its text generation objective. (3) To further investigate what Graformer learns about graph structure, we visualize the differently connected graph views it has learned and indeed find different attention heads for more local and more global graph information. Interestingly, direct neighbors are considered particularly important even without any structural bias, such as introduced by a graph neural network. (4) Analyzing the performance w.r.t. different input graph properties, we find evidence that Graformer's more elaborate global view on the graph is an advantage when it is important to distinguish between distant but connected nodes and truly unreachable ones.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Most recent approaches to graph-to-text generation employ a graph neural network (GNN) based on message passing through the input graph's topology as the encoder in their encoder-decoder architectures (Marcheggiani and Perez-Beltrachini, 2018; Koncel-Kedziorski et al., 2019; Ribeiro et al., 2019; Guo et al., 2019) . As one layer of these encoders only considers immediate neighbors, a large number of stacked layers can be necessary to learn about distant nodes, which in turn also increases the risk of propagating noise .", |
|
"cite_spans": [ |
|
{ |
|
"start": 201, |
|
"end": 243, |
|
"text": "(Marcheggiani and Perez-Beltrachini, 2018;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 244, |
|
"end": 275, |
|
"text": "Koncel-Kedziorski et al., 2019;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 276, |
|
"end": 297, |
|
"text": "Ribeiro et al., 2019;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 298, |
|
"end": 315, |
|
"text": "Guo et al., 2019)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Other approaches (Zhu et al., 2019; Cai and Lam, 2020) base their encoder on the Transformer architecture (Vaswani et al., 2017) and thus, in each layer, compute self-attention on all nodes, not only direct neighbors, facilitating the information flow between distant nodes. Like Graformer, these approaches incorporate information about the graph topology with some variant of relative position embeddings (Shaw et al., 2018) . They, however, assume that there is always a path between any pair of nodes, i.e., there are no unreachable nodes or disconnected subgraphs. Thus they use an LSTM (Hochreiter and Schmidhuber, 1997) to compute a relation embedding from the labels along this path. However, in contrast to the AMR 2 graphs used for their evaluation, KGs are frequently disconnected. Graformer is more flexible and makes no assumption about connectivity. Furthermore, its relative position embeddings only depend on the lengths of shortest paths i.e., purely structural information, not labels. It thus effectively learns differently connected views of its input graph.", |
|
"cite_spans": [ |
|
{ |
|
"start": 17, |
|
"end": 35, |
|
"text": "(Zhu et al., 2019;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 36, |
|
"end": 54, |
|
"text": "Cai and Lam, 2020)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 106, |
|
"end": 128, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 407, |
|
"end": 426, |
|
"text": "(Shaw et al., 2018)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 592, |
|
"end": 626, |
|
"text": "(Hochreiter and Schmidhuber, 1997)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Deficiencies in modeling long-range dependencies in GNNs have been considered a serious limitation before. Various solutions orthogonal to our approach have been proposed in recent work: By 2 abstract meaning representation incorporating a connectivity score into their graph attention network, manage to increase the attention span to k-hop neighborhoods but, finally, only experiment with k = 2. Our graph encoder efficiently handles dependencies between much more distant nodes. Pei et al. (2020) define an additional neighborhood based on Euclidean distance in a continuous node embedding space. Similar to our work, a node can thus receive information from distant nodes, given their embeddings are close enough. However, Pei et al. (2020) compute these embeddings only once before training whereas in our approach node similarity is based on the learned representation in each encoder layer. This allows Graformer to dynamically change node interaction patterns during training.", |
|
"cite_spans": [ |
|
{ |
|
"start": 482, |
|
"end": 499, |
|
"text": "Pei et al. (2020)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 727, |
|
"end": 744, |
|
"text": "Pei et al. (2020)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Recently, Ribeiro et al. (2020) use two GNN encoders -one using the original topology and one with a fully connected version of the graph -and combine their output in various ways for graph-totext generation. This approach can only see two extreme versions of the graph: direct neighbors and full connection. Our approach is more flexible and dynamically learns a different structural view per attention head. It is also more parameter-efficient as our multi-view encoder does not need a separate set of parameters for each view.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Graformer follows the general multi-layer encoderdecoder pattern known from the original Transformer (Vaswani et al., 2017) . In the following, we first describe our formalization of the KG input and then how it is processed by Graformer.", |
|
"cite_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 123, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Graformer Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Knowledge graph. We formalize a knowledge graph (KG) as a directed, labeled multigraph", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph data structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "G KG = (V, A, s, t, l V , l A , E, R) with V", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph data structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "a set of vertices (the KG entities), A a set of arcs (the KG facts), s, t : A \u2192 V functions assigning to each arc its source/target node (the subject/object of a KG fact), and l V : V \u2192 E, l A : A \u2192 R providing labels for vertices and arcs, where R is a set of KG-specific relations and E a set of entity names. Token graph. Entity names usually consist of more than one token or subword unit. Hence, a tokenizer tok : E \u2192 \u03a3 * T is needed that splits an entity's label into its components from the vocabulary \u03a3 T of text tokens. Following recent work (Ribeiro et al., 2020), we mimic this composition- ality of node labels in the graph structure by splitting each node into as many nodes as there are tokens in its label. We thus obtain a directed hypergraph", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph data structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "V T A +1 \u22121 +1 \u22121 +2 \u22122 +1 \u22121", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph data structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "G T = (V T , A, s T , t T , l T , l A , \u03a3 T , R, same),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph data structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where s T , t T : A \u2192 P (V T ) now assign a set of source (resp. target) nodes to each (hyper-) arc and all nodes are labeled with only one token, i.e., l T : V T \u2192 \u03a3 T . Unlike Ribeiro et al. 2020, we additionally keep track of all token nodes' origins: same : V T \u2192 P (V T \u00d7 Z) assigns to each node n all other nodes n stemming from the same entity together with the relative position of l T (n) and l T (n ) in the original tokenized entity name. Fig. 1b shows the token graph corresponding to the KG in Fig. 1a . Incidence graph. For ease of implementation, our final data structure for the KG is the hypergraph's incidence graph, a bipartite graph where hyper-arcs are represented as nodes and edges are unlabeled:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 450, |
|
"end": 457, |
|
"text": "Fig. 1b", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 507, |
|
"end": 514, |
|
"text": "Fig. 1a", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Graph data structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "G = (N, E, l, \u03a3, { SAME p | p \u2208 Z }) where N = V T \u222a A is the set of nodes, E = { (n 1 , n 2 ) | n 1 \u2208 s T (n 2 ) \u2228 n 2 \u2208 t T (n 1 ) } the set of directed edges, l : N \u2192 \u03a3 a label function,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph data structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "and \u03a3 = \u03a3 T \u222a R the vocabulary. We introduce SAME p edges to fully connect same clusters:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph data structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "SAME p = { (n 1 , n 2 ) | (n 2 , p) \u2208 same(n 1 ) }", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph data structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where p differentiates between different relative positions in the original entity string, similar to (Shaw et al., 2018) . See Fig. 1c for an example.", |
|
"cite_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 121, |
|
"text": "(Shaw et al., 2018)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 135, |
|
"text": "Fig. 1c", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Graph data structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The initial graph representation H (0) \u2208 R |N |\u00d7d is obtained by looking up embeddings for the node labels in the learned embedding matrix E \u2208 R |\u03a3|\u00d7d , i.e., H", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graformer encoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "(0) i = e l(n i ) E where e l(n i )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graformer encoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "is the one-hotencoding of the ith node's label.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graformer encoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "To compute the node representation H (L) in the Lth layer, we follow Vaswani et al. (2017) , i.e., we first normalize the input from the previous layer H (L\u22121) via layer normalization LN , followed by multi-head graph self-attention SelfAtt g (see \u00a7 3.3 for details), which -after dropout regularization Dr and a residual connection -yields the intermediate representation I (cf. Eq. (1)). A feedforward layer FF with one hidden layer and GeLU (Hendrycks and Gimpel, 2016) activation computes the final layer output (cf. Eq. (2)). As recommended by Chen et al. 2018, we apply an additional layer normalization step to the output", |
|
"cite_spans": [ |
|
{ |
|
"start": 69, |
|
"end": 90, |
|
"text": "Vaswani et al. (2017)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 444, |
|
"end": 472, |
|
"text": "(Hendrycks and Gimpel, 2016)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graformer encoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "H (L E ) of the last encoder layer L E . I (L) = Dr (SelfAtt g (LN (H (L\u22121) ))) + H (L\u22121)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graformer encoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "(1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graformer encoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "H (L) = Dr (FF (LN (I (L) ))) + I (L)", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Graformer encoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "SelfAtt g computes a weighted sum of H (L\u22121) :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graformer encoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "SelfAtt g (H) i = |N | j=1 \u03b1 g ij (H j W Vg )", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Graformer encoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where W Vg \u2208 R d\u00d7d is a learned parameter matrix.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graformer encoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In the next section, we derive the definition of the graph-structure-informed attention weights \u03b1 g ij .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graformer encoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In this section, we describe the computation of attention weights for multi-head self-attention. Note that the formulas describe the computations for one head. The output of multiple heads is combined as in the original Transformer (Vaswani et al., 2017) . Text self-attention. Shaw et al. (2018) introduced position-aware self-attention in the Transformer by (i) adding a relative position embedding A K \u2208 R M \u00d7M \u00d7d to X's key representation, when computing the softmax-normalized attention scores \u03b1 i between X i \u2208 R d and the complete input embedding matrix X \u2208 R M \u00d7d (cf. Eq. (4)), and (ii) adding a second type of position embedding A V \u2208 R M \u00d7M \u00d7d to X's value representation when computing the weighted sum (cf. Eq. 5):", |
|
"cite_spans": [ |
|
{ |
|
"start": 232, |
|
"end": 254, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 278, |
|
"end": 296, |
|
"text": "Shaw et al. (2018)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attention for text and graphs with relative position embeddings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b1 i = \u03c3 X i W Q (XW K + A K i ) \u221a d (4) V i = n j=1 \u03b1 ij (X j W V + A V ij )", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Self-attention for text and graphs with relative position embeddings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where \u03c3 (\u2022) denotes the softmax function, i.e.,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attention for text and graphs with relative position embeddings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u03c3 (b) i = exp (b i ) J j=1 exp (b j ) , for b \u2208 R J .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attention for text and graphs with relative position embeddings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Recent work (Raffel et al., 2019) has adopted a simplified form where value-modifying embeddings A V are omitted and key-modifying embeddings A K are replaced with learned scalar embeddings S \u2208 R M \u00d7M that -based on relative position -directly in-or decrease attention scores before normalization, i.e., Eq. (4) becomes Eq. (6).", |
|
"cite_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 33, |
|
"text": "(Raffel et al., 2019)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attention for text and graphs with relative position embeddings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b1 i = \u03c3 X i W Q (XW K ) \u221a d + S i", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Self-attention for text and graphs with relative position embeddings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Shaw et al. (2018) share their position embeddings across attention heads but learn separate embeddings for each layer as word representations from different layers can vary a lot. Raffel et al. (2019) learn separate S matrices for each attention head but share them across layers. We use Raffel et al. (2019) 's form of relative position encoding for text self-attention in our decoder ( \u00a7 3.4). Graph self-attention.", |
|
"cite_spans": [ |
|
{ |
|
"start": 181, |
|
"end": 201, |
|
"text": "Raffel et al. (2019)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 289, |
|
"end": 309, |
|
"text": "Raffel et al. (2019)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attention for text and graphs with relative position embeddings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Analogously to selfattention on text, we define our structural graph self-attention as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attention for text and graphs with relative position embeddings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "VT A s v d w e l c u1 u2 s 0 4 5 2 2 2 1 1 3 v -4 0 4 2 2 2 1 1 3 d -5 -4 0 2 2 2 1 1 3 w -2 -2 -2 0 2 2 -1 \u221e 1 e -2 -2 -2 -2 0 4 -3 -1 -1 l -2 -2 -2 -2 -4 0 -3 -1 -1 c -1 -1 -1 1 3 3 0 \u221e 2 u1 -1 -1 -1 \u221e 1 1 \u221e 0 \u221e u2 -3 -3 -3 -1 1 1 -2 \u221e 0", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attention for text and graphs with relative position embeddings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u03b1 g i = \u03c3 H i W Qg (HW Kg ) \u221a d + \u03b3(R) i (7)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attention for text and graphs with relative position embeddings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "W Kg , W Qg \u2208 R d\u00d7d are learned matrices and \u03b3 : Z\u222a{\u221e} \u2192 R looks up learned scalar embeddings for the relative graph positions in R \u2208 R N \u00d7N . We define the relative graph position R ij between the nodes n i and n j with respect to two factors: (i) the text relative position p in the original entity name if n i and n j stem from the same original entity, i.e., (n i , n j ) \u2208 SAME p for some p and (ii) shortest path lengths otherwise:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attention for text and graphs with relative position embeddings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "R ij = \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 \u221e, if \u03b4(n i , n j ) = \u221e and \u03b4(n j , n i ) = \u221e encode(p), if (n i , n j ) \u2208 SAME p \u03b4(n i , n j ), if \u03b4(n i , n j ) \u2264 \u03b4(n j , n i ) \u2212\u03b4(n j , n i ), if \u03b4(n i , n j ) > \u03b4(n j , n i ) (8) where \u03b4(n i , n j )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attention for text and graphs with relative position embeddings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "is the length of the shortest path from n i to n j , which we define to be \u221e if and only if there is no such path. encode maps a text relative position p \u2208 Z \\ {0} to an integer outside \u03b4's range to avoid clashes. Concretely, we use encode(p) := sgn(p) \u2022 \u03b4 max + p where \u03b4 max is the maximum graph diameter, i.e., the maximum value of \u03b4 over all graphs under consideration. Thus, we model graph relative position as the length of the shortest path using either only forward edges (R ij > 0) or only backward edges (R ij < 0). Additionally, two special cases are considered: (i) Nodes without any purely forward or purely backward path between them (R ij = \u221e) and (ii) token nodes from the same entity. Here the relative position in the original entity string p is encoded outside the range of path length encodings (which are always in the interval [\u2212\u03b4 max , \u03b4 max ]).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attention for text and graphs with relative position embeddings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In practice, we use two thresholds, n \u03b4 and n p . All values of \u03b4 exceeding n \u03b4 are set to n \u03b4 and analogously for p. This limits the number of different positions a model can distinguish. Intuition. Our definition of relative position in graphs combines several advantages: (i) Any node can attend to any other node -even unreachable ones -while learning a suitable attention bias for different distances. (ii) SAME p edges are treated differently in the attention mechanism. Thus, entity representations can be learned like in a regular transformer encoder, given that tokens from the same entity are fully connected with SAME p edges with p providing relative position information. (iii) The lengths of shortest paths often have an intuitively useful interpretation in our incidence graphs and the sign of the entries in R also captures the important distinction between incoming and outgoing paths. In this way, Graformer can, e.g., capture the difference between the subject and object of a fact, which is expressed as a relative position of \u22121 vs. 1. The subject and object nodes, in turn, see each other as 2 and \u22122, respectively. Fig. 2 shows the R matrix corresponding to the graph from Fig. 1c . Note how token nodes from the same entity, e.g., s, v, and d, form clusters as they have the same distances to other nodes, and how the relations inside such a cluster are encoded outside the interval [\u22123, 3], i.e., the range of shortest path lengths. It is also insightful to compare node pairs with the same value in R. E.g., both s and w see e at a distance of 2 because the entities SVD and word2vec are both the subject of a fact with embedding learning as the object. Likewise, s sees both c and u1 at a distance of 1 because its entity SVD is subject to both corresponding facts.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1138, |
|
"end": 1144, |
|
"text": "Fig. 2", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 1196, |
|
"end": 1203, |
|
"text": "Fig. 1c", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Self-attention for text and graphs with relative position embeddings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Our decoder follows closely the standard Transformer decoder (Vaswani et al., 2017) , except for the modifications suggested by Chen et al. (2018) . Hidden decoder representation. The initial decoder representation Z (0) \u2208 R M \u00d7d embeds the (partially generated) target text T \u2208 R M \u00d7|\u03a3| , i.e., Z (0) = T E. A decoder layer L then obtains a contextualized representation via self-attention as in the encoder ( \u00a7 3.2):", |
|
"cite_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 83, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 128, |
|
"end": 146, |
|
"text": "Chen et al. (2018)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graformer decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "C (L) = Dr (SelfAtt t (LN (Z (L\u22121) ))) + Z (L\u22121)", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "Graformer decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "SelfAtt t differs from SelfAtt g by using different position embeddings in Eq. (7) and, obviously, R ij is defined in the usual way for text. C (L) is then modified via multi-head attention MHA on the output H (L E ) of the last graph encoder layer L E . As in \u00a7 3.2, we make use of residual connections, layer normalization LN , and dropout Dr :", |
|
"cite_spans": [ |
|
{ |
|
"start": 144, |
|
"end": 147, |
|
"text": "(L)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graformer decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "U (L) = Dr (MHA(LN (C (L) ), H (L E ) )) + C (L)", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "Graformer decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "Z (L) = Dr (FF (LN (U (L) ))) + U (L)", |
|
"eq_num": "(11)" |
|
} |
|
], |
|
"section": "Graformer decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graformer decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "MHA(C, H) i = |N | j=1 \u03b1 ij (H j W Vt )", |
|
"eq_num": "(12)" |
|
} |
|
], |
|
"section": "Graformer decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b1 i = \u03c3 C i W Qt (HW Kt ) \u221a d", |
|
"eq_num": "(13)" |
|
} |
|
], |
|
"section": "Graformer decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Generation probabilities. The final representation Z (L D ) of the last decoder layer L D is used to compute the probability distribution P i \u2208 [0, 1] |\u03a3| over all words in the vocabulary \u03a3 at time step i:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graformer decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P i = \u03c3 Z (L D ) i E", |
|
"eq_num": "(14)" |
|
} |
|
], |
|
"section": "Graformer decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Note that E \u2208 R |\u03a3|\u00d7d is the same matrix that is also used to embed node labels and text tokens.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graformer decoder", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "We train Graformer by minimizing the standard negative log-likelihood loss based on the likelihood estimations described in the previous section.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "We evaluate our new architecture on two popular benchmarks for KG-to-text generation, AGENDA (Koncel-Kedziorski et al., 2019) and WebNLG (Gardent et al., 2017) . While the latter contains crowd-sourced texts corresponding to subgraphs from various DBPedia categories, the former was automatically created by applying an information extraction tool (Luan et al., 2018 ) on a corpus of scientific abstracts (Ammar et al., 2018) . As this process is noisy, we corrected 7 train instances where an entity name was erroneously split on a special character and, for the same reason, deleted 1 train instance entirely. Otherwise, we use the data as is, including the train/dev/test split. We list the number of instances per data split, as well as general statistics about the graphs in Ta in AGENDA were automatically extracted. This leads to a higher number of disconnected graph components. Nearly all WebNLG graphs consist of a single component, i.e., are connected graphs, whereas for AGENDA this is practically never the case. We also report statistics that depend on the tokenization (cf. \u00a7 4.2) as factors like the length of target texts and the percentage of tokens shared verbatim between input graph and target text largely impact the task difficulty.", |
|
"cite_spans": [ |
|
{ |
|
"start": 137, |
|
"end": 159, |
|
"text": "(Gardent et al., 2017)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 348, |
|
"end": 366, |
|
"text": "(Luan et al., 2018", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 405, |
|
"end": 425, |
|
"text": "(Ammar et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 780, |
|
"end": 782, |
|
"text": "Ta", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Following previous work on AGENDA (Ribeiro et al., 2020) , we put the paper title into the graph as another entity. In contrast to Ribeiro et al. (2020), we also link every node from a real entity to every node from the title by TITLE2TXT and TXT2TITLE edges. The type information provided by AGENDA is, as usual for KGs, expressed with one dedicated node per type and HAS-TYPE arcs that link entities to their types. We keep the original pretokenized texts but lowercase the title as both node labels and target texts are also lowercased. For WebNLG, we follow previous work (Gardent et al., 2017) by replacing underscores in entity names with whitespace and breaking apart camelcased relations. We furthermore follow the evaluation protocol of the original challenge by converting all characters to lowercased ASCII and separating all punctuation from alphanumeric characters during tokenization.", |
|
"cite_spans": [ |
|
{ |
|
"start": 34, |
|
"end": 56, |
|
"text": "(Ribeiro et al., 2020)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data preprocessing", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "For both datasets, we train a BPE vocabulary using sentencepiece (Kudo and Richardson, 2018) on the train set, i.e., a concatenation of node labels and target texts. See Table 1 for vocabulary sizes. Note that for AGENDA, only 99.99% of the characters found in the train set are added to the vocabulary. This excludes exotic Unicode characters that occur in certain abstracts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 92, |
|
"text": "(Kudo and Richardson, 2018)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 170, |
|
"end": 177, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data preprocessing", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We prepend entity and relation labels with dedicated E and R tags.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data preprocessing", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We train Graformer with the Adafactor optimizer (Shazeer and Stern, 2018) for 40 epochs on AGENDA and 200 epochs on WebNLG. We report test results for the model yielding the best validation performance measured in corpus-level BLEU (Papineni et al., 2002) . For model selection, we decode greedily. The final results are generated by beam search. Following Ribeiro et al. 2020, we couple beam search with a length penalty (Wu et al., 2016) of 5.0. See Appendix A for more details and a full list of hyperparameters.", |
|
"cite_spans": [ |
|
{ |
|
"start": 48, |
|
"end": 73, |
|
"text": "(Shazeer and Stern, 2018)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 232, |
|
"end": 255, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 422, |
|
"end": 439, |
|
"text": "(Wu et al., 2016)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hyperparameters and training details", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We apply a data loading scheme inspired by the bucketing approach of Koncel-Kedziorski et al. (2019) and length-based curriculum learning (Platanios et al., 2019): We sort the train set by target text length and split it into four buckets of two times 40% and two times 10% of the data. After each training epoch, the buckets are shuffled internally but their global order stays the same from shorter target texts to longer ones. This reduces padding during batching as texts of similar lengths stay together and introduces a mini-curriculum from presumably easier examples (i.e., shorter targets) to more difficult ones for each epoch. This enables us to successfully train Graformer even without a learning rate schedule. Table 2 shows the results of our evaluation on AGENDA in terms of BLEU (Papineni et al., 2002) , METEOR (Banerjee and Lavie, 2005) , and CHRF++ (Popovi\u0107, 2017) . Like the models we compare with, we report the average and standard deviation of 4 runs with different random seeds.", |
|
"cite_spans": [ |
|
{ |
|
"start": 69, |
|
"end": 100, |
|
"text": "Koncel-Kedziorski et al. (2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 795, |
|
"end": 818, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 828, |
|
"end": 854, |
|
"text": "(Banerjee and Lavie, 2005)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 868, |
|
"end": 883, |
|
"text": "(Popovi\u0107, 2017)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 724, |
|
"end": 731, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Epoch curriculum", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Our model outperforms previous Transformerbased models that only consider first-order neighborhoods per encoder layer (Koncel-Kedziorski et al., 2019; recent models by Ribeiro et al. (2020), Graformer performs very similarly. Using both a local and a global graph encoder, Ribeiro et al. (2020) combine information from very distant nodes but at the same time need extra parameters for the second encoder. Graformer is more efficient and still matches their best model's BLEU and METEOR scores within a standard deviation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 150, |
|
"text": "(Koncel-Kedziorski et al., 2019;", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overall performance", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The results on the test set of seen categories of WebNLG (Table 3) look similar. Graformer outperforms most original challenge participants and more recent work. While not performing on par with CGE-LW on WebNLG, Graformer still achieves more than 96% of its performance while using only about half as many parameters.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 66, |
|
"text": "(Table 3)", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Overall performance", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We investigate whether Graformer is more suitable for disconnected graphs by comparing its performance on different splits of the AGENDA test set according to two graph properties: (i) the average number of nodes per connected component (\u00b5 c ) and (ii) the largest diameter across all of a graph's ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Performance on different types of graphs", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We can see in Table 4 that the performance of both Graformer and CGE-LW (Ribeiro et al., 2020) increases with more graph structure (larger \u00b5 c and d), i.e., more information leads to more accurate texts. Besides, Graformer outperforms CGE-LW on BLEU for graphs with smaller components (0 < \u00b5 c < 1.5) and smaller diameters (d < 3). Although METEOR and CHRF++ scores always favor CGE-LW, the performance difference is also smaller for cases where BLEU favors Graformer.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 14, |
|
"end": 21, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "components (d).", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We conjecture that Graformer benefits from its more elaborate global view, i.e., its ability to distinguish between distant but connected nodes and unreachable ones. CGE-LW's global encoder cannot make this distinction because it only sees a fully connected version of the graph.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "components (d).", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Curiously, Graformer's BLEU is also better for larger components (\u00b5 c \u2265 2.0). With multiple larger components, Graformer might also better distinguish nodes that are part of the same component from those that belong to a different one.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "components (d).", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Only for 1.5 < \u00b5 c < 2.0, CGE-LW clearly outperforms Graformer in all metrics. It seems that Graformer is most helpful for extreme cases, i.e., when either most components are isolated nodes or when isolated nodes are the exception. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "components (d).", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In a small ablation study, we examine the impact of beam search, length penalty, and our new epoch curriculum training. We find that beam search and length penalty do contribute to the overall performance but to a relatively small extent. Training with our new epoch curriculum, however, proves crucial for good performance. Platanios et al. (2019) argue that curriculum learning can replace a learning rate schedule, which is usually essential to train a Transformer model. Indeed we successfully optimize Graformer without any learning rate schedule, when applying the epoch curriculum.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ablation study", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We visualize the learned attention bias \u03b3 for different relative graph positions R ij (cf. \u00a7 3.3; esp. Eq. (7)) after training on AGENDA and WebNLG in Fig. 3 . The eight attention heads (x-axis) have learned different weights for each graph position R ij (y-axis). Note that AGENDA has more possible R ij values because n \u03b4 = 6 whereas we set n \u03b4 = 4 for WebNLG.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 151, |
|
"end": 157, |
|
"text": "Fig. 3", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Learned graph structure", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For both datasets, we notice that one attention head primarily focuses on global information (5 for AGENDA, 4 for WebNLG). AGENDA even dedicates head 6 entirely to unreachable nodes, showing the importance of such nodes for this dataset. In contrast, most WebNLG heads suppress information from unreachable nodes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learned graph structure", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For both datasets, we also observe that nearer nodes generally receive a high weight (focus on local information): In Fig. 3b , e.g., head 2 concentrates solely on direct incoming edges and head 0 on direct outgoing ones. Graformer can learn empirically based on its task where direct neighbors are most important and where they are not, showing that the strong bias from graph neural networks is not necessary to learn about graph structure.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 125, |
|
"text": "Fig. 3b", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Learned graph structure", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We presented Graformer, a novel encoder-decoder architecture for graph-to-text generation based on Transformer. The Graformer encoder uses a novel type of self-attention for graphs based on shortest path lengths between nodes, allowing it to detect global patterns by automatically learning appropriate weights for higher-order neighborhoods. In our experiments on two popular benchmarks for text generation from knowledge graphs, Graformer achieved competitive results while using many fewer parameters than alternative models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "For AGENDA and WebNLG, a minimum and maximum decoding length were set according to the shortest and longest target text in the train set. Table 6 lists the hyperparameters used to obtain final results on both datasets. Input dropout is applied on the word embeddings directly after lookup for node labels and target text tokens before they are fed into encoder or decoder. Attention dropout is applied to all attention weights computed during multi-head (self-)attention. For hyperparameter optimization, we only train for the first 10 (AGENDA) or 50 (WebNLG) epochs to save time. We use a combination of manual tuning and a limited number of randomly sampled runs. For the latter we apply Optuna with default parameters (Akiba et al., 2019; Bergstra et al., 2011) and median pruning, i.e., after each epoch we check if the best performance so far is worse than the median performance of previous runs at the same epoch and if so, abort. For hyperparameter tuning, we decode greedily and measure performance in corpus-level BLEU (Papineni et al., 2002) . Table 7 shows three example generations from our Graformer model and the CGE-LW system by Ribeiro et al. (2020) . Often CGE-LW generations have a high surface overlap with the reference text while Graformer texts fluently express the same content.", |
|
"cite_spans": [ |
|
{ |
|
"start": 721, |
|
"end": 741, |
|
"text": "(Akiba et al., 2019;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 742, |
|
"end": 764, |
|
"text": "Bergstra et al., 2011)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1029, |
|
"end": 1052, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1145, |
|
"end": 1166, |
|
"text": "Ribeiro et al. (2020)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1055, |
|
"end": 1062, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Hyperparameter details", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Ref.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Qualitative examples", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "julia morgan has designed many significant buildings , including the los angeles herald examiner building . CGE-LW julia morgan has designed many significant buildings including the los angeles herald examiner building . Ours one of the significant buildings designed by julia morgan is the los angeles herald examiner building .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Qualitative examples", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Ref.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Qualitative examples", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "asam pedas is a dish of fish cooked in a sour and hot sauce that comes from indonesia . CGE-LW the main ingredients of asam pedas are fish cooked in a sour and hot sauce and comes from indonesia . Ours the main ingredients of asam pedas are fish cooked in sour and hot sauce . the dish comes from indonesia .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Qualitative examples", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "banana is an ingredient in binignit which is a dessert . a cookie is also a dessert . CGE-LW banana is an ingredient in binignit , a cookie is also a dessert . Ours a cookie is a dessert , as is binignit , which contains banana as one of its ingredients . Table 7 : Example references and texts generated by CGE-LW (Ribeiro et al., 2020) and Graformer (marked Ours) for samples from the WebNLG test set. In case of multiple references, only one is shown for brevity.", |
|
"cite_spans": [ |
|
{ |
|
"start": 308, |
|
"end": 337, |
|
"text": "CGE-LW (Ribeiro et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 256, |
|
"end": 263, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ref.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our code is publicly available: https://github. com/mnschmit/graformer", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was supported by the BMBF (first author) as part of the project MLWin (01IS18050), by the German Research Foundation (second author) as part of the Research Training Group \"Adaptive Preparation of Information from Heterogeneous Sources\" (AIPHES) under the grant No. GRK 1994/1, and by the Bavarian research institute for digital transformation (bidt) through their fellowship program (third author). We also gratefully acknowledge a Ph.D. scholarship awarded to the first author by the German Academic Scholarship Foundation (Studienstiftung des deutschen Volkes).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Optuna: A nextgeneration hyperparameter optimization framework", |
|
"authors": [ |
|
{ |
|
"first": "Takuya", |
|
"middle": [], |
|
"last": "Akiba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shotaro", |
|
"middle": [], |
|
"last": "Sano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Toshihiko", |
|
"middle": [], |
|
"last": "Yanase", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takeru", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masanori", |
|
"middle": [], |
|
"last": "Koyama", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 25rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Takuya Akiba, Shotaro Sano, Toshihiko Yanase, Takeru Ohta, and Masanori Koyama. 2019. Optuna: A next- generation hyperparameter optimization framework. In Proceedings of the 25rd ACM SIGKDD Interna- tional Conference on Knowledge Discovery and Data Mining.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Construction of the literature graph in semantic scholar", |
|
"authors": [ |
|
{ |
|
"first": "Waleed", |
|
"middle": [], |
|
"last": "Ammar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Groeneveld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chandra", |
|
"middle": [], |
|
"last": "Bhagavatula", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iz", |
|
"middle": [], |
|
"last": "Beltagy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miles", |
|
"middle": [], |
|
"last": "Crawford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Doug", |
|
"middle": [], |
|
"last": "Downey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Dunkelberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "Elgohary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Feldman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vu", |
|
"middle": [], |
|
"last": "Ha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rodney", |
|
"middle": [], |
|
"last": "Kinney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Kohlmeier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Lo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tyler", |
|
"middle": [], |
|
"last": "Murray", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hsu-Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Ooi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joanna", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Power", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucy", |
|
"middle": [], |
|
"last": "Skjonsberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zheng", |
|
"middle": [], |
|
"last": "Wilhelm", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Madeleine", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Van Zuylen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "84--91", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-3011" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Waleed Ammar, Dirk Groeneveld, Chandra Bhagavat- ula, Iz Beltagy, Miles Crawford, Doug Downey, Ja- son Dunkelberger, Ahmed Elgohary, Sergey Feld- man, Vu Ha, Rodney Kinney, Sebastian Kohlmeier, Kyle Lo, Tyler Murray, Hsu-Han Ooi, Matthew Pe- ters, Joanna Power, Sam Skjonsberg, Lucy Wang, Chris Wilhelm, Zheng Yuan, Madeleine van Zuylen, and Oren Etzioni. 2018. Construction of the litera- ture graph in semantic scholar. In Proceedings of the 2018 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, Volume 3 (Indus- try Papers), pages 84-91, New Orleans -Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Repulsive bayesian sampling for diversified attention modeling", |
|
"authors": [ |
|
{ |
|
"first": "Xuannan", |
|
"middle": [], |
|
"last": "Bang An", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changyou", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bang An, Xuannan Dong, and Changyou Chen. 2019. Repulsive bayesian sampling for diversified attention modeling. 4th workshop on Bayesian Deep Learning (NeurIPS 2019).", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "DBpedia: A nucleus for a web of open data", |
|
"authors": [ |
|
{ |
|
"first": "S\u00f6ren", |
|
"middle": [], |
|
"last": "Auer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Bizer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georgi", |
|
"middle": [], |
|
"last": "Kobilarov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jens", |
|
"middle": [], |
|
"last": "Lehmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Cyganiak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [], |
|
"last": "Ives", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 6th International Semantic Web Conference (ISWC)", |
|
"volume": "4825", |
|
"issue": "", |
|
"pages": "722--735", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-540-76298-0_52" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S\u00f6ren Auer, Chris Bizer, Georgi Kobilarov, Jens Lehmann, Richard Cyganiak, and Zachary Ives. 2008. DBpedia: A nucleus for a web of open data. In Pro- ceedings of the 6th International Semantic Web Con- ference (ISWC), volume 4825 of Lecture Notes in Computer Science, pages 722-735. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "METEOR: An automatic metric for MT evaluation with improved correlation with human judgments", |
|
"authors": [ |
|
{ |
|
"first": "Satanjeev", |
|
"middle": [], |
|
"last": "Banerjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Lavie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "65--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Satanjeev Banerjee and Alon Lavie. 2005. METEOR: An automatic metric for MT evaluation with im- proved correlation with human judgments. In Pro- ceedings of the ACL Workshop on Intrinsic and Ex- trinsic Evaluation Measures for Machine Transla- tion and/or Summarization, pages 65-72, Ann Arbor, Michigan. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Algorithms for hyper-parameter optimization", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Bergstra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R\u00e9mi", |
|
"middle": [], |
|
"last": "Bardenet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bal\u00e1zs", |
|
"middle": [], |
|
"last": "K\u00e9gl", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "24", |
|
"issue": "", |
|
"pages": "2546--2554", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James S. Bergstra, R\u00e9mi Bardenet, Yoshua Bengio, and Bal\u00e1zs K\u00e9gl. 2011. Algorithms for hyper-parameter optimization. In J. Shawe-Taylor, R. S. Zemel, P. L. Bartlett, F. Pereira, and K. Q. Weinberger, editors, Advances in Neural Information Processing Systems 24, pages 2546-2554. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Generating fine-grained open vocabulary entity type descriptions", |
|
"authors": [ |
|
{ |
|
"first": "Rajarshi", |
|
"middle": [], |
|
"last": "Bhowmik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerard", |
|
"middle": [], |
|
"last": "De", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melo", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "877--888", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1081" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rajarshi Bhowmik and Gerard de Melo. 2018. Generat- ing fine-grained open vocabulary entity type descrip- tions. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 877-888, Melbourne, Australia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Graph transformer for graph-to-sequence learning", |
|
"authors": [ |
|
{ |
|
"first": "Deng", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wai", |
|
"middle": [], |
|
"last": "Lam", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deng Cai and Wai Lam. 2020. Graph transformer for graph-to-sequence learning. AAAI Conference on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Neural data-to-text generation: A comparison between pipeline and end-to-end architectures", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Thiago Castro Ferreira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Van Der Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emiel", |
|
"middle": [], |
|
"last": "Emiel Van Miltenburg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Krahmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "552--562", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1052" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thiago Castro Ferreira, Chris van der Lee, Emiel van Miltenburg, and Emiel Krahmer. 2019. Neu- ral data-to-text generation: A comparison between pipeline and end-to-end architectures. In Proceed- ings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th Inter- national Joint Conference on Natural Language Pro- cessing (EMNLP-IJCNLP), pages 552-562, Hong Kong, China. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "The best of both worlds: Combining recent advances in neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Mia", |
|
"middle": [], |
|
"last": "Xu Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Orhan", |
|
"middle": [], |
|
"last": "Firat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Bapna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melvin", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wolfgang", |
|
"middle": [], |
|
"last": "Macherey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Foster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhifeng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonghui", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Macduff", |
|
"middle": [], |
|
"last": "Hughes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "76--86", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1008" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mia Xu Chen, Orhan Firat, Ankur Bapna, Melvin John- son, Wolfgang Macherey, George Foster, Llion Jones, Mike Schuster, Noam Shazeer, Niki Parmar, Ashish Vaswani, Jakob Uszkoreit, Lukasz Kaiser, Zhifeng Chen, Yonghui Wu, and Macduff Hughes. 2018. The best of both worlds: Combining recent advances in neural machine translation. In Proceedings of the 56th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 76-86, Melbourne, Australia. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "The WebNLG challenge: Generating text from RDF data", |
|
"authors": [ |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Gardent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anastasia", |
|
"middle": [], |
|
"last": "Shimorina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Perez-Beltrachini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 10th International Conference on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "124--133", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-3518" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Claire Gardent, Anastasia Shimorina, Shashi Narayan, and Laura Perez-Beltrachini. 2017. The WebNLG challenge: Generating text from RDF data. In Pro- ceedings of the 10th International Conference on Natural Language Generation, pages 124-133, San- tiago de Compostela, Spain. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Densely connected graph convolutional networks for graph-to-sequence learning", |
|
"authors": [ |
|
{ |
|
"first": "Zhijiang", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyang", |
|
"middle": [], |
|
"last": "Teng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "297--312", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00269" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhijiang Guo, Yan Zhang, Zhiyang Teng, and Wei Lu. 2019. Densely connected graph convolutional net- works for graph-to-sequence learning. Transactions of the Association for Computational Linguistics, 7:297-312.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Gaussian error linear units (gelus)", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Hendrycks", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Computing Research Repository", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1606.08415" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dan Hendrycks and Kevin Gimpel. 2016. Gaussian er- ror linear units (gelus). Computing Research Reposi- tory, arXiv:1606.08415.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural computation", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "1735--80", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/neco.1997.9.8.1735" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9:1735- 80.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Semisupervised classification with graph convolutional networks", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Kipf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Welling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "International Conference on Learning Representations (ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas N. Kipf and Max Welling. 2017. Semi- supervised classification with graph convolutional networks. In International Conference on Learning Representations (ICLR).", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Text Generation from Knowledge Graphs with Graph Transformers", |
|
"authors": [ |
|
{ |
|
"first": "Rik", |
|
"middle": [], |
|
"last": "Koncel-Kedziorski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhanush", |
|
"middle": [], |
|
"last": "Bekal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2284--2293", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1238" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rik Koncel-Kedziorski, Dhanush Bekal, Yi Luan, Mirella Lapata, and Hannaneh Hajishirzi. 2019. Text Generation from Knowledge Graphs with Graph Transformers. In Proceedings of the 2019 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long and Short Pa- pers), pages 2284-2293, Minneapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Visual genome: Connecting language and vision using crowdsourced dense image annotations", |
|
"authors": [ |
|
{ |
|
"first": "Ranjay", |
|
"middle": [], |
|
"last": "Krishna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuke", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oliver", |
|
"middle": [], |
|
"last": "Groth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenji", |
|
"middle": [], |
|
"last": "Hata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Kravitz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephanie", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yannis", |
|
"middle": [], |
|
"last": "Kalantidis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li-Jia", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Shamma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Bernstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei-Fei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "International Journal of Computer Vision", |
|
"volume": "123", |
|
"issue": "", |
|
"pages": "32--73", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s11263-016-0981-7" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin John- son, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A. Shamma, Michael S. Bernstein, and Fei-Fei Li. 2017. Vi- sual genome: Connecting language and vision us- ing crowdsourced dense image annotations. Interna- tional Journal of Computer Vision, 123:32-73.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "SentencePiece: A simple and language independent subword tokenizer and detokenizer for neural text processing", |
|
"authors": [ |
|
{ |
|
"first": "Taku", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Richardson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "66--71", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-2012" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taku Kudo and John Richardson. 2018. SentencePiece: A simple and language independent subword tok- enizer and detokenizer for neural text processing. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 66-71, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Deeper insights into graph convolutional networks for semi-supervised learning", |
|
"authors": [ |
|
{ |
|
"first": "Qimai", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhichao", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiao", |
|
"middle": [ |
|
"Ming" |
|
], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qimai Li, Zhichao Han, and Xiao ming Wu. 2018. Deeper insights into graph convolutional networks for semi-supervised learning. AAAI Conference on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Multi-task identification of entities, relations, and coreference for scientific knowledge graph construction", |
|
"authors": [ |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mari", |
|
"middle": [], |
|
"last": "Ostendorf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3219--3232", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1360" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yi Luan, Luheng He, Mari Ostendorf, and Hannaneh Hajishirzi. 2018. Multi-task identification of entities, relations, and coreference for scientific knowledge graph construction. In Proceedings of the 2018 Con- ference on Empirical Methods in Natural Language Processing, pages 3219-3232, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Deep graph convolutional encoders for structured data to text generation", |
|
"authors": [ |
|
{ |
|
"first": "Diego", |
|
"middle": [], |
|
"last": "Marcheggiani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Perez-Beltrachini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 11th International Conference on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--9", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-6501" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diego Marcheggiani and Laura Perez-Beltrachini. 2018. Deep graph convolutional encoders for structured data to text generation. In Proceedings of the 11th International Conference on Natural Language Gen- eration, pages 1-9, Tilburg University, The Nether- lands. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Step-by-step: Separating planning from realization in neural data-to-text generation", |
|
"authors": [ |
|
{ |
|
"first": "Amit", |
|
"middle": [], |
|
"last": "Moryossef", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2267--2277", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1236" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amit Moryossef, Yoav Goldberg, and Ido Dagan. 2019. Step-by-step: Separating planning from realization in neural data-to-text generation. In Proceedings of the 2019 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 2267-2277, Minneapolis, Min- nesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1073083.1073135" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic evalu- ation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Compu- tational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Geom-gcn: Geometric graph convolutional networks", |
|
"authors": [ |
|
{ |
|
"first": "Hongbin", |
|
"middle": [], |
|
"last": "Pei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bingzhe", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Chen-Chuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Lei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "International Conference on Learning Representations (ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hongbin Pei, Bingzhe Wei, Kevin Chen-Chuan Chang, Yu Lei, and Bo Yang. 2020. Geom-gcn: Geomet- ric graph convolutional networks. In International Conference on Learning Representations (ICLR).", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Competence-based curriculum learning for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Otilia", |
|
"middle": [], |
|
"last": "Emmanouil Antonios Platanios", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Stretcu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barnabas", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Poczos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1162--1172", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1119" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emmanouil Antonios Platanios, Otilia Stretcu, Graham Neubig, Barnabas Poczos, and Tom Mitchell. 2019. Competence-based curriculum learning for neural machine translation. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1162-1172, Minneapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "chrF++: words helping character n-grams", |
|
"authors": [ |
|
{ |
|
"first": "Maja", |
|
"middle": [], |
|
"last": "Popovi\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Second Conference on Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "612--618", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-4770" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maja Popovi\u0107. 2017. chrF++: words helping charac- ter n-grams. In Proceedings of the Second Confer- ence on Machine Translation, pages 612-618, Copen- hagen, Denmark. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Exploring the limits of transfer learning with a unified text-totext transformer", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Raffel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katherine", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharan", |
|
"middle": [], |
|
"last": "Narang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Matena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanqi", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Computing Research Repository", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1910.10683" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Kather- ine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2019. Exploring the limits of transfer learning with a unified text-to- text transformer. Computing Research Repository, arXiv:1910.10683.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Enhancing AMR-to-text generation with dual graph representations", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Leonardo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gardent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3183--3194", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1314" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leonardo F. R. Ribeiro, Claire Gardent, and Iryna Gurevych. 2019. Enhancing AMR-to-text genera- tion with dual graph representations. In Proceedings of the 2019 Conference on Empirical Methods in Nat- ural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3183-3194, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Modeling global and local node contexts for text generation from knowledge graphs", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Leonardo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gardent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "8", |
|
"issue": "0", |
|
"pages": "589--604", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leonardo F. R. Ribeiro, Yue Zhang, Claire Gardent, and Iryna Gurevych. 2020. Modeling global and local node contexts for text generation from knowledge graphs. Transactions of the Association for Compu- tational Linguistics, 8(0):589-604.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "An unsupervised joint system for text generation from knowledge graphs and semantic parsing", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Schmitt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sahand", |
|
"middle": [], |
|
"last": "Sharifzadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Volker Tresp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7117--7130", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.577" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Schmitt, Sahand Sharifzadeh, Volker Tresp, and Hinrich Sch\u00fctze. 2020. An unsupervised joint sys- tem for text generation from knowledge graphs and semantic parsing. In Proceedings of the 2020 Con- ference on Empirical Methods in Natural Language Processing (EMNLP), pages 7117-7130, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Self-attention with relative position representations", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Shaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "464--468", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-2074" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Shaw, Jakob Uszkoreit, and Ashish Vaswani. 2018. Self-attention with relative position representations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 464-468, New Or- leans, Louisiana. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Adafactor: Adaptive learning rates with sublinear memory cost", |
|
"authors": [ |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mitchell", |
|
"middle": [], |
|
"last": "Stern", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 35th International Conference on Machine Learning", |
|
"volume": "80", |
|
"issue": "", |
|
"pages": "4596--4604", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Noam Shazeer and Mitchell Stern. 2018. Adafactor: Adaptive learning rates with sublinear memory cost. In Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pages 4596-4604. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "GTR-LSTM: A triple encoder for sentence generation from RDF data", |
|
"authors": [ |
|
{ |
|
"first": "Jianzhong", |
|
"middle": [], |
|
"last": "Bayu Distiawan Trisedya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1627--1637", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1151" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bayu Distiawan Trisedya, Jianzhong Qi, Rui Zhang, and Wei Wang. 2018. GTR-LSTM: A triple encoder for sentence generation from RDF data. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1627-1637, Melbourne, Australia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Gar- nett, editors, Advances in Neural Information Pro- cessing Systems 30, page 5998-6008. Curran Asso- ciates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Graph Attention Networks", |
|
"authors": [ |
|
{ |
|
"first": "Petar", |
|
"middle": [], |
|
"last": "Veli\u010dkovi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillem", |
|
"middle": [], |
|
"last": "Cucurull", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arantxa", |
|
"middle": [], |
|
"last": "Casanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adriana", |
|
"middle": [], |
|
"last": "Romero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pietro", |
|
"middle": [], |
|
"last": "Li\u00f2", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Conference on Learning Representations (ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Petar Veli\u010dkovi\u0107, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Li\u00f2, and Yoshua Bengio. 2018. Graph Attention Networks. In International Conference on Learning Representations (ICLR).", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "DrugBank 5.0: a major update to the DrugBank database", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "David S Wishart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Yannick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "An", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Feunang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elvis", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ana", |
|
"middle": [], |
|
"last": "Lo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Marcu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tanvir", |
|
"middle": [], |
|
"last": "Grant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Sajed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carin", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zinat", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sayeeda", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Nucleic Acids Research", |
|
"volume": "46", |
|
"issue": "D1", |
|
"pages": "1074--1082", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David S Wishart, Yannick D Feunang, An C Guo, Elvis J Lo, Ana Marcu, Jason R Grant, Tanvir Sajed, Daniel Johnson, Carin Li, Zinat Sayeeda, Nazanin Assem- pour, Ithayavani Iynkkaran, Yifeng Liu, Adam Ma- ciejewski, Nicola Gale, Alex Wilson, Lucy Chin, Ryan Cummings, Diana Le, Allison Pon, Craig Knox, and Michael Wilson. 2018. DrugBank 5.0: a major update to the DrugBank database for 2018. Nucleic Acids Research, 46(D1):D1074-D1082.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Google's neural machine translation system: Bridging the gap between human and machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Yonghui", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhifeng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Norouzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wolfgang", |
|
"middle": [], |
|
"last": "Macherey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxim", |
|
"middle": [], |
|
"last": "Krikun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuan", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qin", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus", |
|
"middle": [], |
|
"last": "Macherey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Klingner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Apurva", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melvin", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaobing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Gouws", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshikiyo", |
|
"middle": [], |
|
"last": "Kato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Taku", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hideto", |
|
"middle": [], |
|
"last": "Kazawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Keith", |
|
"middle": [], |
|
"last": "Stevens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Kurian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nishant", |
|
"middle": [], |
|
"last": "Patil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Oriol Vinyals", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1609.08144" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, \u0141ukasz Kaiser, Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens, George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith, Jason Riesa, Alex Rudnick, Oriol Vinyals, Greg Corrado, Macduff Hughes, and Jeffrey Dean. 2016. Google's neural machine translation sys- tem: Bridging the gap between human and ma- chine translation. Computing Research Repository, arXiv:1609.08144.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Adaptive structural fingerprints for graph attention networks", |
|
"authors": [ |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaokang", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "International Conference on Learning Representations (ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kai Zhang, Yaokang Zhu, Jun Wang, and Jie Zhang. 2020. Adaptive structural fingerprints for graph at- tention networks. In International Conference on Learning Representations (ICLR).", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Modeling graph structure in transformer for better AMR-to-text generation", |
|
"authors": [ |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junhui", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Muhua", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Longhua", |
|
"middle": [], |
|
"last": "Qian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guodong", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5459--5468", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1548" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jie Zhu, Junhui Li, Muhua Zhu, Longhua Qian, Min Zhang, and Guodong Zhou. 2019. Modeling graph structure in transformer for better AMR-to-text gen- eration. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 5459-5468, Hong Kong, China. Association for Com- putational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "(c) Incidence graph with SAMEp edges (dashed green) Different representations of the same KG (types are omitted for clarity)." |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "R matrix for the graph inFig. 1c (\u03b4 max = 3)." |
|
}, |
|
"FIGREF4": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Attention bias \u03b3 learned by Graformer on the two datasets. SAME p edges are omitted." |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"content": "<table/>", |
|
"text": "Statistics of AGENDA and the dataset from the WebNLG challenge as used in our experiments. Upper part: data splits and original KGs. Lower part: token graphs and BPE settings.", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"content": "<table><tr><td/><td>BLEU</td><td>METEOR CHRF++</td><td>#P</td></tr><tr><td>Ours</td><td>17.80</td><td/></tr><tr><td/><td/><td/><td>). Compared to the very</td></tr></table>", |
|
"text": "\u00b10.31 22.07 \u00b10.23 45.43 \u00b10.39 36.3 GT 14.30 \u00b11.01 18.80 \u00b10.28 --GT+RBS 15.1 \u00b10.97 19.5 \u00b10.29 --CGE-LW 18.01 \u00b10.14 22.34 \u00b10.07 46.69 \u00b10.17 69.8", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"5\">: Experimental results on AGENDA. GT (Graph</td></tr><tr><td colspan=\"5\">Transformer) from (Koncel-Kedziorski et al., 2019);</td></tr><tr><td colspan=\"5\">GT+RBS from (An et al., 2019); CGE-LW from</td></tr><tr><td colspan=\"5\">(Ribeiro et al., 2020). Number of parameters in mil-</td></tr><tr><td>lions.</td><td/><td/><td/><td/></tr><tr><td/><td>BLEU</td><td colspan=\"3\">METEOR CHRF++ #P</td></tr><tr><td>Ours</td><td colspan=\"4\">61.15 \u00b10.22 43.38 \u00b10.17 75.43 \u00b10.19 5.3</td></tr><tr><td colspan=\"2\">UPF-FORGe 40.88</td><td>40.00</td><td>-</td><td>-</td></tr><tr><td colspan=\"2\">Melbourne 54.52</td><td>41.00</td><td>70.72</td><td>-</td></tr><tr><td>Adapt</td><td>60.59</td><td>44.00</td><td>76.01</td><td>-</td></tr><tr><td colspan=\"2\">Graph Conv. 55.90</td><td>39.00</td><td>-</td><td>4.9</td></tr><tr><td colspan=\"2\">GTR-LSTM 58.60</td><td>40.60</td><td>-</td><td>-</td></tr><tr><td>E2E GRU</td><td>57.20</td><td>41.00</td><td>-</td><td>-</td></tr><tr><td>CGE-LW-</td><td/><td/><td/><td/></tr></table>", |
|
"text": "LG 63.69 \u00b10.10 44.47 \u00b10.12 76.66 \u00b10.10 10.4", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"content": "<table><tr><td>: Experimental results on the WebNLG test set</td></tr><tr><td>with seen categories. CGE-LW-LG from (Ribeiro et al.,</td></tr><tr><td>2020); Adapt, Melbourne and UPF-FORGe from (Gar-</td></tr><tr><td>dent et al., 2017); Graph Conv. from (Marcheggiani and</td></tr><tr><td>Perez-Beltrachini, 2018); GTR-LSTM from (Trisedya</td></tr><tr><td>et al., 2018); E2E GRU from (Castro Ferreira et al.,</td></tr><tr><td>2019). Number of parameters in millions.</td></tr></table>", |
|
"text": "", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF6": { |
|
"num": null, |
|
"content": "<table/>", |
|
"text": "Performance of a single run on the test split of AGENDA w.r.t. different input graph properties. The number of data points in each split is indicated in parentheses.", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"content": "<table/>", |
|
"text": "Ablation study for a single run on the test portion of AGENDA.", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF10": { |
|
"num": null, |
|
"content": "<table/>", |
|
"text": "Hyperparameters used to obtain final experimental results on WebNLG and AGENDA.", |
|
"html": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |