|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:47:26.680964Z" |
|
}, |
|
"title": "Knowledge and Keywords Augmented Abstractive Sentence Summarization", |
|
"authors": [ |
|
{ |
|
"first": "Shuo", |
|
"middle": [], |
|
"last": "Guan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "New York University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ping", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tongji University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Zhihua", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tongji University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In this paper, we study the knowledge-based abstractive sentence summarization. There are two essential information features that can influence the quality of news summarization, which are topic keywords and the knowledge structure of the news text. Besides, the existing knowledge-augmented methods have poor performance on sentence summarization since the sparse knowledge structure problem. Considering these, we propose KAS, a novel Knowledge and Keywords Augmented Abstractive Sentence Summarization framework. Tri-encoders are utilized to integrate contexts of original text, knowledge structure and keywords topic simultaneously, with a special linearized knowledge structure. Automatic and human evaluations demonstrate that KAS achieves the best performances. 1", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In this paper, we study the knowledge-based abstractive sentence summarization. There are two essential information features that can influence the quality of news summarization, which are topic keywords and the knowledge structure of the news text. Besides, the existing knowledge-augmented methods have poor performance on sentence summarization since the sparse knowledge structure problem. Considering these, we propose KAS, a novel Knowledge and Keywords Augmented Abstractive Sentence Summarization framework. Tri-encoders are utilized to integrate contexts of original text, knowledge structure and keywords topic simultaneously, with a special linearized knowledge structure. Automatic and human evaluations demonstrate that KAS achieves the best performances. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "With the increasing of computing power and model capacity, it is possible to generate mostly grammatical summarization of natural language text. In general, there are two essential information features of summarization: (1) topic keywords in text (2) the knowledge structure of the text. These features can basically cover all the information in summary generation, especially in sentence or short text summarization. Therefore, considering this reason, we are building a neural network model that integrates both topic keyword context and knowledge structure context.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Knowledge augmented summarization has been intensively studied recently, most of which are about document summarization. However, there is not much research on knowledge-based sentence summarization. The main reason is that the existing methods are not applicable to sentence summarization. The knowledge-based summarization frameworks usually use GNN (Graph 1 Code is in https://github.com/SeanG-325/KAS Neural Networks) as the knowledge structure encoder. GNN is designated for encoding graph structure, and it has gained good performances on integrating knowledge graph contexts for document and multi-paragraph summarization. However, the knowledge graph of sentence is usually sparse, and GNN has poor performance in sparse knowledge structure. Specifically, GNNs may cause over-smoothing problem when training on the sparse graphs (Alon and Yahav, 2021) , especially for GCNs (Graph Convolutional Networks) (Kipf and Welling, 2017), decreasing the robustness and performance of the model. Therefore, we are creating a new knowledge-augmented sentence summarization model considering these problems. Besides, considering most of the knowledge based summarization models are only applicable to English, we are aiming at making our model applicable to multiple languages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 359, |
|
"end": 360, |
|
"text": "1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 837, |
|
"end": 859, |
|
"text": "(Alon and Yahav, 2021)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In order to address these issues, we propose a special linearized knowledge sequence structure that are applicable to sentence summarization. Correspondingly, we propose a novel triencoder framework KAS integrating three separate encoders, considering contexts of original text, topic keywords and knowledge structure simultaneously based on their salience. Evaluations demonstrate that KAS framework and the corresponding linearized knowledge structure enhances the performances significantly. Besides, the structure of KAS can be applied to summarization on multiple languages. We have conducted experiments on English and Chinese corpus and achieved best performances on both.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Knowledge-based Summarization The existing method for utilizing knowledge graph into text generation and summarization is adding a separate encoder to encode the vectorized knowledge graph for context integration. Ribeiro et al. (2020) ", |
|
"cite_spans": [ |
|
{ |
|
"start": 214, |
|
"end": 235, |
|
"text": "Ribeiro et al. (2020)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "L = {l 1 , ..., l m } S KG = DFS KGL (G k , L) return S KG", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "troduced a knowledge graph encoding strategy for graph-to-text generation model. Koncel-Kedziorski et al. 2019, Huang et al. (2020) proposed a text generation (summarization) model integrated with a GNN encoder (Veli\u010dkovi\u0107 et al., 2018) using encoded graph data preprocessed from the input text. Aiming at solving the possible sparse problem of graphs, Konstas et al. (2017) and Fan et al. (2019) proposed methods of graph linearization, and used LSTM/transformer encoders to encode the graph structure.", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 131, |
|
"text": "Huang et al. (2020)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 236, |
|
"text": "(Veli\u010dkovi\u0107 et al., 2018)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 353, |
|
"end": 374, |
|
"text": "Konstas et al. (2017)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 379, |
|
"end": 396, |
|
"text": "Fan et al. (2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The Pointer Mechanism Pointer Mechanism has drawn much attention in text generation (Miao and Blunsom, 2016; Gulcehre et al., 2016; Eric and Manning, 2017) . In text summarization, Pointer-Generator Network model (See et al., 2017) is proposed to keep the generation ability while using pointer mechanism. proposed a method for using pointer mechanism with multiple separate encoders. The idea of Pointer Mechanism is setting soft or hard gates to select from predefined vocabulary or input sequences to generate tokens.", |
|
"cite_spans": [ |
|
{ |
|
"start": 84, |
|
"end": 108, |
|
"text": "(Miao and Blunsom, 2016;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 109, |
|
"end": 131, |
|
"text": "Gulcehre et al., 2016;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 132, |
|
"end": 155, |
|
"text": "Eric and Manning, 2017)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 213, |
|
"end": 231, |
|
"text": "(See et al., 2017)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The whole linearized knowledge graph constructing process is presented in Algorithm 1. For E, we use OLLIE (Mausam et al., 2012) ", |
|
"cite_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 128, |
|
"text": "(Mausam et al., 2012)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge and Keywords Construction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "{ } i S, K, G i O ...", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge and Keywords Construction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "RoBERTa Embedding Technology Platform (Che et al., 2010) . As shown in the algorithm, fact triples in different granularity will be re-extracted until the granularity of the entities and relationships (i.e. remove all attributes and duplicate nouns redundancy) all triples is minimized, and we keep reconstructing the triples to enhance the connectivity of the knowledge graph. We assume each triple e has 3 elements: E 1 , R and E 2 . The E in e.E denotes E 1 and E 2 , which is {E 1 , E 2 }. Then all edges (relationships) will be converted to vertices.", |
|
"cite_spans": [ |
|
{ |
|
"start": 38, |
|
"end": 56, |
|
"text": "(Che et al., 2010)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Guterres praised G20", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We then generate the linearized knowledge graph sequence by a modified DFS algorithm. The DFS is modified on the start vertex selection and priorities of different traversal paths. When traversal starts or the current vertex has more than one path, we select the vertex whose token first appear in the source text as the next. It reduces text redundance effectively and makes the framework focus more on the key logic instead of other irrelevant information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Guterres praised G20", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In order to construct keywords topic sequences, we use TextRank (Mihalcea and Tarau, 2004) algorithm to extract keywords from source text, and make them in the order in which they appears in the original text. This brings priori topic knowledge to the model and makes the model explicitly consider the keywords topic information of the text.", |
|
"cite_spans": [ |
|
{ |
|
"start": 64, |
|
"end": 90, |
|
"text": "(Mihalcea and Tarau, 2004)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Guterres praised G20", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "KAS takes as input a news text S = {x i }, a keywords topic sequence K = {k i } and a knowledge sequence G = {v i }, and let D = {S, K, G}. The tri-encoder structure shown in Figure 1 integrates the context of original source text, keywords topic and internal knowledge. The RoBERTa (Liu et al., 2019) is utilized for word embedding pre-training, and we use the outputs of the last RoBERTa layer as the input embedding for all encoders. We build encoders to the generate hidden states", |
|
"cite_spans": [ |
|
{ |
|
"start": 283, |
|
"end": 301, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 175, |
|
"end": 183, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "h S t , h K t , h G t , which is h x t = g(h x t\u22121 )(x \u2208 D)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": ", in which function g is a bi-directional LSTM. The hidden states in the final time step of the three encoders,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "h S l 1 , h K l 2 , h G l 3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": ", should be transformed into the decoder initial state", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "d 0 = tanh(W m \u2022 [h S l 1 ||h K l 2 ||h G l 3 ])", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": ". The attentions of the source text, keywords and knowledge are computed as(\u03b1 S t ), (\u03b1 K t ) and (\u03b1 G t ) (Bahdanau et al., 2015) . The context vectors are computed as c", |
|
"cite_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 130, |
|
"text": "(Bahdanau et al., 2015)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "x t = t i=0 \u03b1 x i h x i , x \u2208 D.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We decode with an attention-based decoder, the decoder hidden state at timestep", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "t d t is d t = f (d t\u22121 , c S t\u22121 , c K t\u22121 , c G t\u22121 , y t\u22121 ), in which d t\u22121", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "is the decoder hidden state, y t\u22121 is the decoder input, c x t\u22121 are the context vectors. The function f denotes to an unidirectional LSTM.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The salience for the three contexts should be automatically adjusted. Therefore, besides the word-level attention in each encoder, we further utilize a encoder-level hierarchical attention mechanism for ensemble context. We compute the ensemble attention as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Attention", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "b x = u T tanh(W x hc c x t + W x hd d t + b x h ) \u03b2 x = softmax(b x ), x \u2208 D \u03b2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Attention", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "x is the hierarchical attention weight of the three contexts in the ensemble context. We then compute the ensemble context c * t as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Attention", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "c * t = x\u2208D \u03b2 x c x t", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Attention", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The ensemble context c * t is a fixed length vector encoding salient information from the three contexts of the tri-encoder model. P vocab (w) is calculated by scaling [h t ||c * t ] to the vocabulary size and taking a softmax:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Attention", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "P vocab (w) = softmax(W s [h t ||c * t ] + b s )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Attention", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To allow W s to reuse the linguistic in input embedding and decrease the number of parameters, we integrate weight-sharing mechanism (Paulus et al., 2018) in the model as W s = tanh(W emb \u2022 W sh ), in which W emb is input embedding matrix.", |
|
"cite_spans": [ |
|
{ |
|
"start": 133, |
|
"end": 154, |
|
"text": "(Paulus et al., 2018)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Attention", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Tri-Copy Mechanism We compute p cpy , which is overall copy probability and will be distributed to the three encoders:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Attention", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "p cpy = \u03c3(W cpy [h t ||c * t ] + b cpy )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Attention", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "P cpy (w) is distributed to the tri-encoders with soft gates \u03bb S , \u03bb K , \u03bb G . Here, \u03bb i (i \u2208 D) automatically adjust d t , y t\u22121 , and the context vector c i t . We define \u03bb i as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Attention", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u03bb i = \u03c3(w T di d t + w T yi y t\u22121 + w T ci c i t ) x \u03c3(w T dx d t + w T yx y t\u22121 + w T cx c x t ) \u2022 p cpy (i, x \u2208 D)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Attention", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The training loss can be defined as the the negative log likelihood of the target sequence:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Attention", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "L = \u2212 T t=0 log p(y t = w * t |P vocab , S, K, G, y <t )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Attention", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "in which w * t is the target word at step t, T is the length of the target sequence. The multi-copy mechanism can make the model more inclined to generate informative words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hierarchical Attention", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use LCSTS dataset (Hu et al., 2015) , which contains a training set of 2.4M online Chinese short news texts in Chinese social media SinaWeibo. We choose 725 pairs from the test set with high annotation scores as our test set. Besides, we consider the annotated Gigaword corpus (Rush et al., 2015) , which leads to around 3.8M training samples and 1951 test samples for evaluation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 38, |
|
"text": "(Hu et al., 2015)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 280, |
|
"end": 299, |
|
"text": "(Rush et al., 2015)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The model is mainly implemented in Tensorflow 2 . In the data preprocess step, we use Jieba 3 for Chinese words segmentation and topic keywords extraction, and LTP (Che et al., 2010) for knowledge extraction. For English we use OLLIE to extract knowledge triples. For our model, we have 512dimensional hidden states and word embedding. We use a predefined vocabulary of 60k words for (See et al., 2017 ) 38.22 25.80 35.46 GLOBAL(Lin et al., 2018 39.40 26.90 36.50 NCLS (Zhu et al., 2019) 39.71 27.45 37.13 CATT (Duan et al., 2019) 44.35 30.65 40.58 LEXICON(Wan et al., 2020) 42 Table 1 : F 1 scores on the LCSTS dataset in terms of the full-length RG-1, RG-2, and RG-L. Bold means the best. \"+KG\" and \"+Kw\" means the model augmented by knowledge and keywords separately. (Zhou et al., 2017) 36.15 17.54 33.63 GLOBAL (Lin et al., 2018) 36.30 18.00 33.80 GENPARSE (Song et al., 2020 ) 36.61 18.85 34.33 CPDS(Wang et al., 2019 37 both source and target in word-level inputs. Adagrad optimizer is used with learning rate 0.15 and an initial accumulator value of 0.1. All models are trained on a single NVIDIA RTX 2080 Ti GPU, with a batch size of 64 on inputs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 164, |
|
"end": 182, |
|
"text": "(Che et al., 2010)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 384, |
|
"end": 401, |
|
"text": "(See et al., 2017", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 402, |
|
"end": 445, |
|
"text": ") 38.22 25.80 35.46 GLOBAL(Lin et al., 2018", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 469, |
|
"end": 487, |
|
"text": "(Zhu et al., 2019)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 511, |
|
"end": 530, |
|
"text": "(Duan et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 549, |
|
"end": 574, |
|
"text": "LEXICON(Wan et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 771, |
|
"end": 790, |
|
"text": "(Zhou et al., 2017)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 816, |
|
"end": 834, |
|
"text": "(Lin et al., 2018)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 862, |
|
"end": 880, |
|
"text": "(Song et al., 2020", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 881, |
|
"end": 923, |
|
"text": ") 36.61 18.85 34.33 CPDS(Wang et al., 2019", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 578, |
|
"end": 585, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiment Settings", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Models RG-1 RG-2 RG-L PGEN+COV", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiment Settings", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Models RG-1 RG-2 RG-L SEASS", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiment Settings", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The model is evaluated with the standard ROUGE metric (Lin, 2004) , shown in Table 1 and 2. We use the F 1 scores for ROUGE-1, ROUGE-2 and ROUGE-L. Besides the automatic evaluation, we further conduct human evaluation for the framework. We randomly sample 100 articles from LCSTS test set and ask 3 Chinese native speakers to rate summaries of our systems and the baseline (PGEN+COV), along with outputs by human-written summaries. After reading the articles, each judge scores summaries on a Likert scale from 1 (worst) to 5 (best) on (1)informativeness and (2)fluency. Besides, in the experiment we noticed that the outputs of KAS are more diversified and attractive to readers, so we test (3)diversity: whether the summary arouses annotators' reading interest. We consider two types of unfaithful errors: (i) hallucination error and (ii) logical error. We ask the annotators to label each Case Study ST:", |
|
"cite_spans": [ |
|
{ |
|
"start": 54, |
|
"end": 65, |
|
"text": "(Lin, 2004)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 84, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Automatic and Human Evaluation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "ST: Sinopec plans to promote the property right reorganization of downstream oil sales business, which is known as a breakthrough for central enterprises to develop mixed ownership and break monopoly. The private enterprise boss said frankly that if the monopoly of oil sources in gas stations is not released, the significance of transferring part of the space at the equity level is limited: \"it's meaningless to let private capital participate in the shares without solving the problem of oil source. Table 3 : Human evaluation on informativeness (Inf.), fluency (Flu.) and diversity (Div.) (1-to-5), and hallucination error(HE.) and logical error (LE.) (0-to-1). Bold are the bests. : Significantly different from all other models. (p < 0.05)", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 504, |
|
"end": 511, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Automatic and Human Evaluation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "type as 1 for existence of errors and 0 otherwise.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automatic and Human Evaluation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The automatic evaluation scores show that KAS achieves bests on LCSTS and Gigaword. The Table 3 shows that KAS augmented by both keywords topic and knowledge achieves the best results in all indicators, with significant enhancements. A case study on LCSTS is shown in Figure 2 . The words marked green are the key information of the sentence, and the words marked blue are the diversified and eye-catching words generated by KwKG model, which is augmented by both keywords and knowledge. We can see that the model only augmented by keywords (+Kw) mistakenly ignored some key points in the sentence, while the KwKG model contains all summary points.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 96, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 269, |
|
"end": 277, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Further, we find that diversity indicator of summaries is enhanced significantly, which is reflected in Table 3 . Specifically, in the experiment we found the KwKG model may usually generate diversified words, as showed in the figure (marked blue). As LCSTS is a dataset of social media news in an eyecatching style, we speculate while the knowledge structure may enhance the understanding ability of the framework, it can implicitly enhance the memory of the writing styles of the training set.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 111, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "In this work, we propose KAS, an abstractive summarization framework augmented by knowledge and topic keywords that supports multiple languages. Experimental results show that KAS generates more qualified summaries and achieves the best performances. In the future, we aim at enhancing attractiveness of sentence summarization based on our structure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Here we show the details of the indicators in human evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appendices A Details of Human Evaluation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Informativeness It is the indicator reflecting whether the generated summary covers all important information points in the input text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appendices A Details of Human Evaluation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Fluency The indicator reflecting whether the summary is grammatically correct, clear and coherent.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appendices A Details of Human Evaluation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Diversity The indicator reflecting whether the summary arouses annotators' reading interest(which is a key quality indicator of social media news summaries).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appendices A Details of Human Evaluation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The error for model of generating summaries whose logic structures contradicting with which in the original text (such as summarizing \"A is B's dog\" as \"B is A's dog\").", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Logical Error", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Hallucination Error The error for model of generating summaries containing the facts that are not in or cannot be inferred from original text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Logical Error", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For details and case study, we randomly pick an example of generated summaries in Figure 2 . The original examples (in Chinese) are shown and all the texts are carefully translated into English for reading convenience. The words marked in green are key information points in original text, and the words marked in blue are diversified phrase. The examples demonstrate that the combination of knowledge graphs and keywords sequence can increase logicality and diversity in Chinese summarization tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 82, |
|
"end": 90, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "B Case Study", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.tensorflow.org/ 3 https://github.com/fxsjy/jieba", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was partially supported by CIMS, and partially supported by National Natural Science Foundation of China (No.61976160).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "ST: Sinopec plans to promote the property right reorganization of downstream oil sales business, which is known as a breakthrough for central enterprises to develop mixed ownership and break monopoly. The private enterprise boss said frankly that if the monopoly of oil sources in gas stations is not released, the significance of transferring part of the space at the equity level is limited: \"it's meaningless to let private capital participate in the shares without solving the problem of oil source.\" Ref:Ref: Private oil enterprises boss: it's meaningless to let private capital participate in shares without solving the problem of oil sources +Kw: +Kw: Private oil enterprise boss: it's meaningless to let private capital participate in shares +KwKG: +KwKG: Private oil enterprise boss bombards Sinopec plan: it's meaningless to let private capital participate in shares without solving the problem of oil sources", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Examples of summary ST:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "ST: The Ministry of education requires schools and kindergartens to formulate specific measures to prevent table waste, and to promote the civilized way of eating with small portions and full meals for many times. Primary and secondary schools around the country also need to carry out social surveys on catering consumption, office paper and household water consumption, and participate in experience activities in social practice bases of grain saving, water saving and environmental protection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ST:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Opinions on extensive and in-depth development of thrift education in primary and secondary school kindergartens +Kw: +Kw: Ministry of Education: schools should formulate specific measures to prevent table waste +KwKG: +KwKG: Requirements of the Ministry of Education: schools guide students to be diligent and thrifty Figure 3 : An example of generated summaries on the LCSTS dataset. ST is source text; Ref is reference summary; +Kw is keywords topic augmented; +KwKG is keywords topic and knowledge augmented.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 319, |
|
"end": 327, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ref: Ref:", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "On the bottleneck of graph neural networks and its practical implications", |
|
"authors": [ |
|
{ |
|
"first": "Uri", |
|
"middle": [], |
|
"last": "Alon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eran", |
|
"middle": [], |
|
"last": "Yahav", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "9th International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Uri Alon and Eran Yahav. 2021. On the bottleneck of graph neural networks and its practical implications. In 9th International Conference on Learning Repre- sentations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Neural machine translation by jointly learning to align and translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "3rd International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In 3rd Internation- al Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Confer- ence Track Proceedings.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "LTP: A Chinese language technology platform", |
|
"authors": [ |
|
{ |
|
"first": "Wanxiang", |
|
"middle": [], |
|
"last": "Che", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhenghua", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Coling 2010: Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "13--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wanxiang Che, Zhenghua Li, and Ting Liu. 2010. LTP: A Chinese language technology platform. In Coling 2010: Demonstrations, pages 13-16, Beijing, China. Coling 2010 Organizing Committee.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Contrastive attention mechanism for abstractive sentence summarization", |
|
"authors": [ |
|
{ |
|
"first": "Xiangyu", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongfei", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingming", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weihua", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3044--3053", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1301" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiangyu Duan, Hongfei Yu, Mingming Yin, Min Zhang, Weihua Luo, and Yue Zhang. 2019. Con- trastive attention mechanism for abstractive sentence summarization. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 3044-3053, Hong Kong, China. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "A copyaugmented sequence-to-sequence architecture gives good performance on task-oriented dialogue", |
|
"authors": [ |
|
{ |
|
"first": "Mihail", |
|
"middle": [], |
|
"last": "Eric", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "468--473", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mihail Eric and Christopher Manning. 2017. A copy- augmented sequence-to-sequence architecture gives good performance on task-oriented dialogue. In Pro- ceedings of the 15th Conference of the European Chapter of the Association for Computational Lin- guistics: Volume 2, Short Papers, pages 468-473, Valencia, Spain. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Using local knowledge graph construction to scale Seq2Seq models to multidocument inputs", |
|
"authors": [ |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Gardent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chlo\u00e9", |
|
"middle": [], |
|
"last": "Braud", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4186--4196", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1428" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Angela Fan, Claire Gardent, Chlo\u00e9 Braud, and An- toine Bordes. 2019. Using local knowledge graph construction to scale Seq2Seq models to multi- document inputs. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 4186-4196, Hong Kong, China. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Pointing the unknown words", |
|
"authors": [ |
|
{ |
|
"first": "Caglar", |
|
"middle": [], |
|
"last": "Gulcehre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sungjin", |
|
"middle": [], |
|
"last": "Ahn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramesh", |
|
"middle": [], |
|
"last": "Nallapati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "140--149", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-1014" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Caglar Gulcehre, Sungjin Ahn, Ramesh Nallapati, Bowen Zhou, and Yoshua Bengio. 2016. Pointing the unknown words. In Proceedings of the 54th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 140- 149, Berlin, Germany. Association for Computation- al Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "LC-STS: A large scale Chinese short text summarization dataset", |
|
"authors": [ |
|
{ |
|
"first": "Baotian", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qingcai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fangze", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1967--1972", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D15-1229" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Baotian Hu, Qingcai Chen, and Fangze Zhu. 2015. LC- STS: A large scale Chinese short text summarization dataset. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 1967-1972, Lisbon, Portugal. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Knowledge graph-augmented abstractive summarization with semantic-driven cloze reward", |
|
"authors": [ |
|
{ |
|
"first": "Luyang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lingfei", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5094--5107", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.457" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luyang Huang, Lingfei Wu, and Lu Wang. 2020. Knowledge graph-augmented abstractive summa- rization with semantic-driven cloze reward. In Pro- ceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 5094- 5107, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Semi-Supervised Classification with Graph Convolutional Networks", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Kipf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Welling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 5th International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas N. Kipf and Max Welling. 2017. Semi- Supervised Classification with Graph Convolutional Networks. In Proceedings of the 5th International Conference on Learning Representations, ICLR '17.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Text Generation from Knowledge Graphs with Graph Transformers", |
|
"authors": [ |
|
{ |
|
"first": "Rik", |
|
"middle": [], |
|
"last": "Koncel-Kedziorski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhanush", |
|
"middle": [], |
|
"last": "Bekal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2284--2293", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1238" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rik Koncel-Kedziorski, Dhanush Bekal, Yi Luan, Mirella Lapata, and Hannaneh Hajishirzi. 2019. Text Generation from Knowledge Graphs with Graph Transformers. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 2284-2293, Minneapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Neural AMR: Sequence-to-sequence models for parsing and generation", |
|
"authors": [ |
|
{ |
|
"first": "Ioannis", |
|
"middle": [], |
|
"last": "Konstas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Srinivasan", |
|
"middle": [], |
|
"last": "Iyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "146--157", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-1014" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ioannis Konstas, Srinivasan Iyer, Mark Yatskar, Yejin Choi, and Luke Zettlemoyer. 2017. Neural AMR: Sequence-to-sequence models for parsing and gener- ation. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 146-157, Vancouver, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "ROUGE: A package for automatic evaluation of summaries", |
|
"authors": [ |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Text Summarization Branches Out", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "74--81", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chin-Yew Lin. 2004. ROUGE: A package for automat- ic evaluation of summaries. In Text Summarization Branches Out, pages 74-81, Barcelona, Spain. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Global encoding for abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Junyang", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuming", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "163--169", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-2027" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Junyang Lin, Xu Sun, Shuming Ma, and Qi Su. 2018. Global encoding for abstractive summarization. In Proceedings of the 56th Annual Meeting of the As- sociation for Computational Linguistics (Volume 2: Short Papers), pages 163-169, Melbourne, Australi- a. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Roberta: A robustly optimized bert pretraining approach. arXiv e-prints", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv e-prints, arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Open language learning for information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Mausam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Schmitz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Soderland", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Bart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "523--534", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mausam, Michael Schmitz, Stephen Soderland, Robert Bart, and Oren Etzioni. 2012. Open language learn- ing for information extraction. In Proceedings of the 2012 Joint Conference on Empirical Methods in Nat- ural Language Processing and Computational Natu- ral Language Learning, pages 523-534, Jeju Island, Korea. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Language as a latent variable: Discrete generative models for sentence compression", |
|
"authors": [ |
|
{ |
|
"first": "Yishu", |
|
"middle": [], |
|
"last": "Miao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "319--328", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1031" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yishu Miao and Phil Blunsom. 2016. Language as a latent variable: Discrete generative models for sen- tence compression. In Proceedings of the 2016 Con- ference on Empirical Methods in Natural Language Processing, pages 319-328, Austin, Texas. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "TextRank: Bringing order into text", |
|
"authors": [ |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Tarau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 2004 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "404--411", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rada Mihalcea and Paul Tarau. 2004. TextRank: Bringing order into text. In Proceedings of the 2004 Conference on Empirical Methods in Natural Lan- guage Processing, pages 404-411, Barcelona, Spain. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Abstractive text summarization using sequence-to-sequence RNNs and beyond", |
|
"authors": [ |
|
{ |
|
"first": "Ramesh", |
|
"middle": [], |
|
"last": "Nallapati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u00c7aglar", |
|
"middle": [], |
|
"last": "Cicero Dos Santos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Gul\u00e7ehre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Xiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of The 20th SIGNLL Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "280--290", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/K16-1028" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ramesh Nallapati, Bowen Zhou, Cicero dos Santos, \u00c7aglar Gul\u00e7ehre, and Bing Xiang. 2016. Abstrac- tive text summarization using sequence-to-sequence RNNs and beyond. In Proceedings of The 20th SIGNLL Conference on Computational Natural Lan- guage Learning, pages 280-290, Berlin, Germany. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "A deep reinforced model for abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Romain", |
|
"middle": [], |
|
"last": "Paulus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Romain Paulus, Caiming Xiong, and Richard Socher. 2018. A deep reinforced model for abstractive sum- marization. In International Conference on Learn- ing Representations.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Modeling global and local node contexts for text generation from knowledge graphs", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Leonardo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gardent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "589--604", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00332" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leonardo F. R. Ribeiro, Yue Zhang, Claire Gardent, and Iryna Gurevych. 2020. Modeling global and local node contexts for text generation from knowl- edge graphs. Transactions of the Association for Computational Linguistics, 8:589-604.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "A neural attention model for abstractive sentence summarization", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Rush", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sumit", |
|
"middle": [], |
|
"last": "Chopra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "379--389", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D15-1044" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexander M. Rush, Sumit Chopra, and Jason Weston. 2015. A neural attention model for abstractive sen- tence summarization. In Proceedings of the 2015 Conference on Empirical Methods in Natural Lan- guage Processing, pages 379-389, Lisbon, Portugal. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Get to the point: Summarization with pointergenerator networks", |
|
"authors": [ |
|
{ |
|
"first": "Abigail", |
|
"middle": [], |
|
"last": "See", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1073--1083", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-1099" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abigail See, Peter J. Liu, and Christopher D. Manning. 2017. Get to the point: Summarization with pointer- generator networks. In Proceedings of the 55th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1073- 1083, Vancouver, Canada. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Joint parsing and generation for abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Kaiqiang", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Logan", |
|
"middle": [], |
|
"last": "Lebanoff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qipeng", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xipeng", |
|
"middle": [], |
|
"last": "Qiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiangyang", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "The Thirty-Second Innovative Applications of Artificial Intelligence Conference", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "8894--8901", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaiqiang Song, Logan Lebanoff, Qipeng Guo, Xipeng Qiu, Xiangyang Xue, Chen Li, Dong Yu, and Fei Liu. 2020. Joint parsing and generation for abstrac- tive summarization. In The Thirty-Fourth AAAI Con- ference on Artificial Intelligence, AAAI 2020, The Thirty-Second Innovative Applications of Artificial Intelligence Conference, IAAI 2020, The Tenth AAAI Symposium on Educational Advances in Artificial In- telligence, EAAI 2020, New York, NY, USA, Febru- ary 7-12, 2020, pages 8894-8901. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Multi-source pointer network for product title summarization", |
|
"authors": [ |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanxiao", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changhua", |
|
"middle": [], |
|
"last": "Pei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenwu", |
|
"middle": [], |
|
"last": "Ou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaobo", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th ACM International Conference on Information and Knowledge Management, CIKM '18", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7--16", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3269206.3271722" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fei Sun, Peng Jiang, Hanxiao Sun, Changhua Pei, Wen- wu Ou, and Xiaobo Wang. 2018. Multi-source point- er network for product title summarization. In Pro- ceedings of the 27th ACM International Conference on Information and Knowledge Management, CIKM '18, pages 7-16, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Graph attention networks", |
|
"authors": [ |
|
{ |
|
"first": "Petar", |
|
"middle": [], |
|
"last": "Veli\u010dkovi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillem", |
|
"middle": [], |
|
"last": "Cucurull", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arantxa", |
|
"middle": [], |
|
"last": "Casanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adriana", |
|
"middle": [], |
|
"last": "Romero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pietro", |
|
"middle": [], |
|
"last": "Li\u00f3", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Petar Veli\u010dkovi\u0107, Guillem Cucurull, Arantxa Casano- va, Adriana Romero, Pietro Li\u00f3, and Yoshua Bengio. 2018. Graph attention networks. In International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Lexiconconstrained copying network for chinese abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Zhuo", |
|
"middle": [], |
|
"last": "Boyan Wan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Boyan Wan, Zhuo Tang, and Li Yang. 2020. Lexicon- constrained copying network for chinese abstractive summarization. CoRR, abs/2010.08197.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Concept pointer network for abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Wenbo", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heyan", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuxiang", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3076--3085", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1304" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenbo Wang, Yang Gao, Heyan Huang, and Yuxiang Zhou. 2019. Concept pointer network for abstrac- tive summarization. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 3076-3085, Hong Kong, China. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Selective encoding for abstractive sentence summarization", |
|
"authors": [ |
|
{ |
|
"first": "Qingyu", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Furu", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1095--1104", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-1101" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qingyu Zhou, Nan Yang, Furu Wei, and Ming Zhou. 2017. Selective encoding for abstractive sentence summarization. In Proceedings of the 55th Annual Meeting of the Association for Computational Lin- guistics (Volume 1: Long Papers), pages 1095-1104, Vancouver, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "NCLS: Neural cross-lingual summarization", |
|
"authors": [ |
|
{ |
|
"first": "Junnan", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qian", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yining", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiajun", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaonan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengqing", |
|
"middle": [], |
|
"last": "Zong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3054--3064", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1302" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Junnan Zhu, Qian Wang, Yining Wang, Yu Zhou, Ji- ajun Zhang, Shaonan Wang, and Chengqing Zong. 2019. NCLS: Neural cross-lingual summarization. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 3054- 3064, Hong Kong, China. Association for Computa- tional Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "The Model Structure of KAS. The \u03bb i are soft gates for distributing copy probabilities.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"text": "Private oil enterprises boss: it's meaningless to let private capital participate in shares without solving the problem of oil sources +Kw: +Kw: Private oil enterprise boss: it's meaningless to let private capital participate in shares +KwKG: +KwKG: Private oil enterprise boss bombards Sinopec plan: it's meaningless to let private capital participate in shares without solving the problem of oil sources.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"text": "A case study on the LCSTS dataset. ST is source text; Ref is reference summary; +Kw is keywords augmented; +KwKG is keywords and knowledge augmented.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"text": "", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"text": "to extract triples from English news texts. As few established tools are for open domain Chinese triple extraction, we extract triples from semantic rules using Language", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td/><td/><td>News Text: News Text:</td></tr><tr><td/><td/><td colspan=\"2\">UN secretary-general UN secretary-general</td></tr><tr><td>UN UN</td><td>G20 G20</td><td colspan=\"2\">attends G20 summit saying... attends G20 summit saying...</td></tr><tr><td>... ...</td><td/><td/></tr><tr><td/><td/><td/><td>Linearization</td></tr><tr><td colspan=\"2\">Keywords Topic Encoder</td><td colspan=\"2\">Text Encoder</td><td>Knowledge Encoder</td></tr><tr><td colspan=\"2\">Attention</td><td/></tr><tr><td/><td/><td>Vocab</td><td>Copy</td></tr><tr><td/><td/><td>Softmax</td><td>Mechanism</td></tr></table>" |
|
}, |
|
"TABREF4": { |
|
"text": "F 1 scores on the Gigaword dataset in terms of the full-length RG-1, RG-2, and RG-L. Bold means the best.", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |