|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:47:20.787108Z" |
|
}, |
|
"title": "Rewards with Negative Examples for Reinforced Topic-Focused Abstractive Summarization", |
|
"authors": [ |
|
{ |
|
"first": "Khalil", |
|
"middle": [], |
|
"last": "Mrini", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of California", |
|
"location": { |
|
"addrLine": "San Diego La Jolla", |
|
"postCode": "92093", |
|
"region": "CA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Can", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Dreyer", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We consider the problem of topic-focused abstractive summarization, where the goal is to generate an abstractive summary focused on a particular topic, a phrase of one or multiple words. We hypothesize that the task of generating topic-focused summaries can be improved by showing the model what it must not focus on. We introduce a deep reinforcement learning approach to topic-focused abstractive summarization, trained on rewards with a novel negative example baseline. We define the input in this problem as the source text preceded by the topic. We adapt the CNN-Daily Mail and New York Times summarization datasets for this task. We then show through experiments on existing rewards that the use of a negative example baseline can outperform the use of a self-critical baseline, in ROUGE, BERTSCORE, and human evaluation metrics.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We consider the problem of topic-focused abstractive summarization, where the goal is to generate an abstractive summary focused on a particular topic, a phrase of one or multiple words. We hypothesize that the task of generating topic-focused summaries can be improved by showing the model what it must not focus on. We introduce a deep reinforcement learning approach to topic-focused abstractive summarization, trained on rewards with a novel negative example baseline. We define the input in this problem as the source text preceded by the topic. We adapt the CNN-Daily Mail and New York Times summarization datasets for this task. We then show through experiments on existing rewards that the use of a negative example baseline can outperform the use of a self-critical baseline, in ROUGE, BERTSCORE, and human evaluation metrics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Topic-focused summarization is the task of generating a summary given a source text and a specific query or topic. Approaches to topic-focused summarization include query relevance and importance (Gupta et al., 2007) , multi-modality manifold ranking (Wan et al., 2007; Wan, 2008; Wan and Xiao, 2009) , and query attention (Nema et al., 2017) . The DUC 2005 and 2006 datasets (Dang, 2005 (Dang, , 2006 are examples of datasets that are widely used for this task. These datasets are much smaller than benchmark datasets for generic summarization, resulting in fewer research work to train topicfocused summarization on state-of-the-art systems (Deutsch and Roth, 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 196, |
|
"end": 216, |
|
"text": "(Gupta et al., 2007)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 251, |
|
"end": 269, |
|
"text": "(Wan et al., 2007;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 270, |
|
"end": 280, |
|
"text": "Wan, 2008;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 281, |
|
"end": 300, |
|
"text": "Wan and Xiao, 2009)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 323, |
|
"end": 342, |
|
"text": "(Nema et al., 2017)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 345, |
|
"end": 361, |
|
"text": "The DUC 2005 and", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 362, |
|
"end": 375, |
|
"text": "2006 datasets", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 376, |
|
"end": 387, |
|
"text": "(Dang, 2005", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 388, |
|
"end": 401, |
|
"text": "(Dang, , 2006", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 643, |
|
"end": 667, |
|
"text": "(Deutsch and Roth, 2019)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In parallel, there has been growing work in recent years on reinforcement learning approaches to (generic) abstractive summarization. Proposed rewards aim to optimize non-differentiable summarization metrics like ROUGE (Lin, 2004) and BERTSCORE (Zhang et al., 2019) , or to encourage desirable summary aspects like semantic cohe-sion (Celikyilmaz et al., 2018) and entity coherence (Sharma et al., 2019) . Many reinforced abstractive summarization methods use the self-critical baseline or SCST (Rennie et al., 2017) to cap their rewards. This self-critical baseline is obtained by greedily searching for a sequence that maximizes the likelihood probability of the current model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 219, |
|
"end": 230, |
|
"text": "(Lin, 2004)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 245, |
|
"end": 265, |
|
"text": "(Zhang et al., 2019)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 334, |
|
"end": 360, |
|
"text": "(Celikyilmaz et al., 2018)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 382, |
|
"end": 403, |
|
"text": "(Sharma et al., 2019)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 490, |
|
"end": 516, |
|
"text": "SCST (Rennie et al., 2017)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we propose a reinforcement learning-based approach to topic-focused summarization. First, we adapt widely used generic summarization benchmarks to this task, such that we aim to generate only one out of three summary sentences, given a corresponding topic. Then, instead of using the self-critical baseline, we introduce a novel baseline that uses negative examples: a sentence that contains information that the summarization model should not focus on.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We run experiments on two existing generic summarization datasets adapted to our task: CNN-Daily Mail (Hermann et al., 2015; Nallapati et al., 2017) and New York Times (Sandhaus, 2008) . Our experiments span two existing rewards: the popular ROUGE-L reward and the Distributed Semantic Reward (DSR) of Li et al. (2019) , inspired by BERTSCORE (Zhang et al., 2019) . Our results show that using our negative example baseline outperforms the self-critical baseline across both datasets and both rewards. We obtain improvements on both datasets in ROUGE and BERTSCORE metrics, and human annotators find that summaries generated with our negative baseline for rewards are generally more relevant to the given topic.", |
|
"cite_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 124, |
|
"text": "CNN-Daily Mail (Hermann et al., 2015;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 125, |
|
"end": 148, |
|
"text": "Nallapati et al., 2017)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 168, |
|
"end": 184, |
|
"text": "(Sandhaus, 2008)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 302, |
|
"end": 318, |
|
"text": "Li et al. (2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 343, |
|
"end": 363, |
|
"text": "(Zhang et al., 2019)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There are different definitions of topic-focused summarization. The DUC datasets (Dang, 2005 (Dang, , 2006 propose summarization of documents given a question, also called a query. Vanderwende et al. (2007) propose SumFocus, a system for topicfocused multi-document extractive summarization. SumFocus is comprised of four components: a generic extractive summarization system, a topicfocusing component, sentence simplification, and lexical expansion of topic words. Deutsch and Roth (2019) define the task of summary cloze as the problem of deciding which content to select in topic-focused summarization, given a context (partial summary). They propose a neural model with separate encoders for the topic and the partial summary.", |
|
"cite_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 92, |
|
"text": "(Dang, 2005", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 93, |
|
"end": 106, |
|
"text": "(Dang, , 2006", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 181, |
|
"end": 206, |
|
"text": "Vanderwende et al. (2007)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 467, |
|
"end": 490, |
|
"text": "Deutsch and Roth (2019)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Topic-Focused Summarization", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "There is a growing body of work that use reinforcement learning (RL) methods to optimize nondifferentiable rewards. ROUGE scores remain a popular RL reward. Other rewards include sentence selection to improve ROUGE scores (Chen and Bansal, 2018; Pasunuru and Bansal, 2018) , optimizing question answering metrics (Scialom et al., 2019) , and adding desirable custom features to generated summaries (B\u00f6hm et al., 2019; Sharma et al., 2019) . Li et al. (2019) find that rewards based on BERTSCORE (Zhang et al., 2019) and ROUGE each optimize their own metric, but decrease the other one. Wang et al. (2018) introduce a topic-aware reinforced summarization model. The authors experiment with generic -not topic-focused -summarization datasets, and infer topics using LDA (Blei et al., 2003) . Information about the topics is then infused into the model using a topic-aware attention mechanism and topic embeddings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 222, |
|
"end": 245, |
|
"text": "(Chen and Bansal, 2018;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 246, |
|
"end": 272, |
|
"text": "Pasunuru and Bansal, 2018)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 313, |
|
"end": 335, |
|
"text": "(Scialom et al., 2019)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 398, |
|
"end": 417, |
|
"text": "(B\u00f6hm et al., 2019;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 418, |
|
"end": 438, |
|
"text": "Sharma et al., 2019)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 441, |
|
"end": 457, |
|
"text": "Li et al. (2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 495, |
|
"end": 515, |
|
"text": "(Zhang et al., 2019)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 586, |
|
"end": 604, |
|
"text": "Wang et al. (2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 768, |
|
"end": 787, |
|
"text": "(Blei et al., 2003)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reinforcement Learning for Summarization", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Whereas a few (Narayan et al., 2018; Li et al., 2018) use the REINFORCE algorithm (Williams, 1992), many RL-based summarization approaches (Paulus et al., 2018; Pasunuru and Bansal, 2018; Li et al., 2018; Celikyilmaz et al., 2018; Yang et al., 2018; Li et al., 2019) use the self-critical sequence training approach (SCST) (Rennie et al., 2017).", |
|
"cite_spans": [ |
|
{ |
|
"start": 14, |
|
"end": 36, |
|
"text": "(Narayan et al., 2018;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 37, |
|
"end": 53, |
|
"text": "Li et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 139, |
|
"end": 160, |
|
"text": "(Paulus et al., 2018;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 161, |
|
"end": 187, |
|
"text": "Pasunuru and Bansal, 2018;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 188, |
|
"end": 204, |
|
"text": "Li et al., 2018;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 205, |
|
"end": 230, |
|
"text": "Celikyilmaz et al., 2018;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 231, |
|
"end": 249, |
|
"text": "Yang et al., 2018;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 250, |
|
"end": 266, |
|
"text": "Li et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reinforcement Learning for Summarization", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We tackle the task of topic-focused abstractive summarization as the problem of producing an abstractive summary focused on a given topic, a phrase of one or multiple words. The generated summary should include information from the input text that is related to the topic, and exclude all other infor-mation. Consequently, different topics with the same input text should yield different summaries.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "More formally, given an input text x, a topic t and a corresponding reference summary y, we aim to maximize the probability that we generate the right summary:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(y|x, t) > p(y |x, t)", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Problem Statement", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "for all y = y.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In self-critical sequence learning (Rennie et al., 2017; Wang et al., 2018) , the RL loss formula is as follows for our task:", |
|
"cite_spans": [ |
|
{ |
|
"start": 35, |
|
"end": 56, |
|
"text": "(Rennie et al., 2017;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 57, |
|
"end": 75, |
|
"text": "Wang et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rewards with a Negative Example", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "LRL = \u2212 (r(y s ) \u2212 b) N i=1 logP (y s i |y s 1 , ..., y s i\u22121 , x, t) (2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rewards with a Negative Example", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "where b is the RL baseline, and b = r(\u0177) in selfcritical sequence learning. y s is a sampled summary, and\u0177 is a summary obtained greedily by maximizing the probability of the overall sequence. In our particular task, we propose to use a baseline with a negative example. The intuition is that we encourage the model to generate summaries that are more similar to the reference summary than the negative example. This negative example is an independent sentence from the summary of the corresponding source text, but which does not contain the topic. The negative example acts as a sample of undesirable information, and helps the summarization model learn what kind of information to exclude.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rewards with a Negative Example", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Given a reference summary y, and a negative summary\u0233, our RL loss with negative examples is defined as in equation 2, where we define the RL baseline: b = r(\u0233).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rewards with a Negative Example", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We apply our method on two popular rewards for summarization: rewards based on the ROUGE and BERTSCORE metrics between the sampled summary and the reference summary. ROUGE-L reward. Given a sampled summary y s and a reference summary y, we define the ROUGE-L reward as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rewards", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "r R (y s ) = ROUGE(y s , y)", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Rewards", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "BERTSCORE Reward. We adopt the Distributed Semantic Reward (DSR) definition of Li et al. (2019) . This reward measures the semantic similarity with the reference summary. It is defined as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 79, |
|
"end": 95, |
|
"text": "Li et al. (2019)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rewards", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "r s (y s ) = F BERT (y s , y)", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Rewards", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "where F BERT is the F1 formula of BERTScore (Zhang et al., 2019) . It is defined as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 44, |
|
"end": 64, |
|
"text": "(Zhang et al., 2019)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rewards", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "F BERT = 2 * P BERT * R BERT P BERT + R BERT (5)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rewards", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "where the precision P BERT and recall R BERT are defined as follows for a given reference y and candidate y :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rewards", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "P BERT = 1 |y | x j \u2208y max x i \u2208y x ix j (6) R BERT = 1 |y| x i \u2208y max x j \u2208y", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rewards", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "x ix j (7)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Rewards", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Whereas Pasunuru and Bansal (2018) train by alternating multiple rewards, Li et al. 2019propose a single loss formula combining DSR and ROUGE rewards. However, their results show that combining DSR and ROUGE does not yield better results in either ROUGE or F BERT scores, compared to using only one reward at a time, along with the summarization loss. We decide to also use only one reward at a time. We aim to optimize the following loss function:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Formula", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L = \u2212(1 \u2212 \u03b3) * log p (y|x, t) +\u03b3 * L RL\u2212N (y s , y,\u0233)", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "Loss Formula", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "where \u03b3 is a hyperparameter, and the first term is the negative log-likelihood loss with the reference summary as the target.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Formula", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In our experiments, we use two popular summarization benchmarks: the non-anoymized CNN-Daily Mail dataset (Hermann et al., 2015; Nallapati et al., 2017) and the New York Times (NYT) dataset (Sandhaus, 2008 the \"highlights\" at the top of the article on the original website. We consider each sentence separately as a reference summary, thereby creating on average 3 datapoints from 1 datapoint in the original dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 128, |
|
"text": "(Hermann et al., 2015;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 129, |
|
"end": 152, |
|
"text": "Nallapati et al., 2017)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 190, |
|
"end": 205, |
|
"text": "(Sandhaus, 2008", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We filter the large NYT dataset to only get articles with 3-sentence summaries. Similarly to the CNN-Daily Mail dataset, summary sentences are independent, making them fit for topic-focused abstractive summarization.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We use TopicRank (Bougouin et al., 2013) , a keyphrase extraction algorithm, to get the 10 most popular keyphrases of the input text. Out of these 10 keyphrases, we pick the highest-scoring one which only appears in the specific summary sentence y i to be the topic of y i . We consider one of the other reference summary sentences of the same input text as the negative example. Therefore, each datapoint in our version of the datasets contains an input text, a reference summary, a negative summary, and a topic. We make sure that no same input text (article) appears in more than one data split. We show the statistics of the dataset used in Table 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 17, |
|
"end": 40, |
|
"text": "(Bougouin et al., 2013)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 645, |
|
"end": 653, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We set \u03b3 = 0.9984 following Paulus et al. (2018) to balance the magnitude difference. We adopt the BART Large architecture (Lewis et al., 2019) as it set a state of the art in the generic summarization of the CNN-Daily Mail dataset, among other tasks. We use a learning rate of 3e \u2212 5. We start training from the best model trained on the cross-entropy objective only, on our topic-focused summarization datasets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 48, |
|
"text": "Paulus et al. (2018)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 123, |
|
"end": 143, |
|
"text": "(Lewis et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Details", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We prepend the topic and a separator token to the input text of each datapoint. As the average reference summary is shorter (on average 13 tokens), we set the length of generated summaries at test time between 10 and 20 tokens. Table 3 : Human evaluation ratings of two annotators on 40 sampled summaries from each dataset, comparing reinforced summarization models trained with the negative baseline (ours) vs. the self-critical baseline.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 228, |
|
"end": 235, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Training Details", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We train four extractive baseline models and three abstractive summarization baseline models. The four extractive baseline models are common summarization baselines, that are meant to give an idea about the difficulty of the task. The first baseline model is Lead-1, which chooses the first sentence from the source article as the generated summary. The second baseline model is BM25 (Robertson et al., 1995) , an IR-based score to rank search results given the query. In our case, BM25 ranks sentences of the source article given the topic, and outputs the most relevant sentence as the generated summary. The third baseline model is SumFocus (Vanderwende et al., 2007) , an unsupervised probabilistic model for topic-focused summarization. The fourth baseline is Oracle-1, which greedily searches for the sentence with the highest ROUGE score with the reference summary. This baseline model is meant as an upper-bound for extractive summarization models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 379, |
|
"end": 408, |
|
"text": "BM25 (Robertson et al., 1995)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 644, |
|
"end": 670, |
|
"text": "(Vanderwende et al., 2007)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline Models", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We experiment with three abstractive summarization baseline models that use BART Large. The first baseline model is trained on cross-entropy only. The second and third baseline models are trained with the ROUGE and BERTSCORE (DSR) rewards respectively, with the self-critical method.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline Models", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We show the results of our experiments in Tables 2. Our proposed approach outperforms the cross-entropy-only BART baseline, but also the two self-critical approaches across both datasets. This shows that negative examples are a good reward baseline in topic-focused summarization. We notice a significant jump in performance in ROUGE-L F1 especially (about 2.5 points), and an increase in ROUGE-1 and ROUGE-2 as well, compared to the cross-entropy-only baseline. Our BERTSCORErewarded model achieves the highest F BERT scores, with a slight increase from its self-critical counterpart.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "We hire two annotators to judge the fluency and topic relevance of the 40 sampled summary pairs, and therefore get 80 evaluations for each criterion. We ask the two annotators to compare our models with their self-critical counterparts. The annotators are not informed about which model generated which summary. Results in Table 3 show that our model's summaries are generally more relevant to the topic, and that our BERTSCORE models are more fluent.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 323, |
|
"end": 330, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "We propose a deep reinforcement learning approach to topic-focused abstractive summarization, where we aim to generate summaries focused on a given phrase of one or multiple words. We introduce a new baseline for rewards, based on negative examples collected from independent summary sentences. We show through experiments that our proposed approach outperforms the baseline of selfcritical reinforcement learning in the optimized reward metric, and human annotators find our model generates summaries that are more relevant to the topic.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "6" |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Better rewards yield better summaries: Learning to summarise without references", |
|
"authors": [ |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "B\u00f6hm", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Christian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ori", |
|
"middle": [], |
|
"last": "Meyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Shapira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3101--3111", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Florian B\u00f6hm, Yang Gao, Christian M Meyer, Ori Shapira, Ido Dagan, and Iryna Gurevych. 2019. Bet- ter rewards yield better summaries: Learning to sum- marise without references. In Proceedings of the 2019 Conference on Empirical Methods in Natu- ral Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3101-3111.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Topicrank: Graph-based topic ranking for keyphrase extraction", |
|
"authors": [ |
|
{ |
|
"first": "Adrien", |
|
"middle": [], |
|
"last": "Bougouin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Boudin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B\u00e9atrice", |
|
"middle": [], |
|
"last": "Daille", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adrien Bougouin, Florian Boudin, and B\u00e9atrice Daille. 2013. Topicrank: Graph-based topic ranking for keyphrase extraction.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Deep communicating agents for abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Asli", |
|
"middle": [], |
|
"last": "Celikyilmaz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bosselut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1662--1675", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Asli Celikyilmaz, Antoine Bosselut, Xiaodong He, and Yejin Choi. 2018. Deep communicating agents for abstractive summarization. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, Volume 1 (Long Pa- pers), pages 1662-1675.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Fast abstractive summarization with reinforce-selected sentence rewriting", |
|
"authors": [ |
|
{ |
|
"first": "Yen-Chun", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "675--686", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yen-Chun Chen and Mohit Bansal. 2018. Fast abstrac- tive summarization with reinforce-selected sentence rewriting. In Proceedings of the 56th Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), pages 675-686.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Overview of duc", |
|
"authors": [ |
|
{ |
|
"first": "Hoa", |
|
"middle": [ |
|
"Trang" |
|
], |
|
"last": "Dang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the document understanding conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--12", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hoa Trang Dang. 2005. Overview of duc 2005. In Pro- ceedings of the document understanding conference, pages 1-12.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Overview of duc", |
|
"authors": [ |
|
{ |
|
"first": "Hoa", |
|
"middle": [ |
|
"Trang" |
|
], |
|
"last": "Dang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the document understanding conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hoa Trang Dang. 2006. Overview of duc 2006. In Pro- ceedings of the document understanding conference.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Summary cloze: A new task for content selection in topic-focused summarization", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Deutsch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3711--3720", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Deutsch and Dan Roth. 2019. Summary cloze: A new task for content selection in topic-focused summarization. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 3711-3720.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Measuring importance and query relevance in topicfocused multi-document summarization", |
|
"authors": [ |
|
{ |
|
"first": "Surabhi", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ani", |
|
"middle": [], |
|
"last": "Nenkova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics Companion", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "193--196", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Surabhi Gupta, Ani Nenkova, and Dan Jurafsky. 2007. Measuring importance and query relevance in topic- focused multi-document summarization. In Pro- ceedings of the 45th Annual Meeting of the Associa- tion for Computational Linguistics Companion Vol- ume Proceedings of the Demo and Poster Sessions, pages 193-196.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Teaching machines to read and comprehend", |
|
"authors": [ |
|
{ |
|
"first": "Karl", |
|
"middle": [], |
|
"last": "Moritz Hermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Kocisky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Grefenstette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lasse", |
|
"middle": [], |
|
"last": "Espeholt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Kay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mustafa", |
|
"middle": [], |
|
"last": "Suleyman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1693--1701", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karl Moritz Hermann, Tomas Kocisky, Edward Grefen- stette, Lasse Espeholt, Will Kay, Mustafa Suleyman, and Phil Blunsom. 2015. Teaching machines to read and comprehend. In Advances in neural information processing systems, pages 1693-1701.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal ; Abdelrahman Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ves", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1910.13461" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Mar- jan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov, and Luke Zettlemoyer. 2019. Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. arXiv preprint arXiv:1910.13461.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Actor-critic based training framework for abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Piji", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lidong", |
|
"middle": [], |
|
"last": "Bing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wai", |
|
"middle": [], |
|
"last": "Lam", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1803.11070" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piji Li, Lidong Bing, and Wai Lam. 2018. Actor-critic based training framework for abstractive summariza- tion. arXiv preprint arXiv:1803.11070.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Deep reinforcement learning with distributional semantic rewards for abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Siyao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deren", |
|
"middle": [], |
|
"last": "Lei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pengda", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"Yang" |
|
], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6038--6044", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1623" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Siyao Li, Deren Lei, Pengda Qin, and William Yang Wang. 2019. Deep reinforcement learning with dis- tributional semantic rewards for abstractive summa- rization. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 6038-6044, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Rouge: A package for automatic evaluation of summaries", |
|
"authors": [ |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Text summarization branches out", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "74--81", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out, pages 74-81.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Summarunner: a recurrent neural network based sequence model for extractive summarization of documents", |
|
"authors": [ |
|
{ |
|
"first": "Ramesh", |
|
"middle": [], |
|
"last": "Nallapati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Feifei", |
|
"middle": [], |
|
"last": "Zhai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3075--3081", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ramesh Nallapati, Feifei Zhai, and Bowen Zhou. 2017. Summarunner: a recurrent neural network based se- quence model for extractive summarization of docu- ments. In Proceedings of the Thirty-First AAAI Con- ference on Artificial Intelligence, pages 3075-3081.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Ranking sentences for extractive summarization with reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Shay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1747--1759", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shashi Narayan, Shay B Cohen, and Mirella Lapata. 2018. Ranking sentences for extractive summariza- tion with reinforcement learning. In Proceedings of the 2018 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Pa- pers), pages 1747-1759.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Diversity driven attention model for query-based abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Preksha", |
|
"middle": [], |
|
"last": "Nema", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Mitesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anirban", |
|
"middle": [], |
|
"last": "Khapra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Balaraman", |
|
"middle": [], |
|
"last": "Laha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ravindran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1063--1072", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Preksha Nema, Mitesh M Khapra, Anirban Laha, and Balaraman Ravindran. 2017. Diversity driven atten- tion model for query-based abstractive summariza- tion. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 1063-1072.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Multireward reinforced summarization with saliency and entailment", |
|
"authors": [ |
|
{ |
|
"first": "Ramakanth", |
|
"middle": [], |
|
"last": "Pasunuru", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "646--653", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ramakanth Pasunuru and Mohit Bansal. 2018. Multi- reward reinforced summarization with saliency and entailment. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 646- 653.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "A deep reinforced model for abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Romain", |
|
"middle": [], |
|
"last": "Paulus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Romain Paulus, Caiming Xiong, and Richard Socher. 2018. A deep reinforced model for abstractive sum- marization. In International Conference on Learn- ing Representations.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Self-critical sequence training for image captioning", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Steven", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Etienne", |
|
"middle": [], |
|
"last": "Rennie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Youssef", |
|
"middle": [], |
|
"last": "Marcheret", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jerret", |
|
"middle": [], |
|
"last": "Mroueh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vaibhava", |
|
"middle": [], |
|
"last": "Ross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Goel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7008--7024", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steven J Rennie, Etienne Marcheret, Youssef Mroueh, Jerret Ross, and Vaibhava Goel. 2017. Self-critical sequence training for image captioning. In Proceed- ings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7008-7024.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Okapi at trec-3", |
|
"authors": [ |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Stephen E Robertson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Susan", |
|
"middle": [], |
|
"last": "Walker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Micheline", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Hancock-Beaulieu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gatford", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Nist Special Publication Sp", |
|
"volume": "109", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen E Robertson, Steve Walker, Susan Jones, Micheline M Hancock-Beaulieu, Mike Gatford, et al. 1995. Okapi at trec-3. Nist Special Publication Sp, 109:109.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "The new york times annotated corpus. Linguistic Data Consortium", |
|
"authors": [ |
|
{ |
|
"first": "Evan", |
|
"middle": [], |
|
"last": "Sandhaus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Evan Sandhaus. 2008. The new york times annotated corpus. Linguistic Data Consortium, Philadelphia, 6(12):e26752.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Answers unite! unsupervised metrics for reinforced summarization models", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Scialom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Lamprier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Piwowarski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacopo", |
|
"middle": [], |
|
"last": "Staiano", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1909.01610" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Scialom, Sylvain Lamprier, Benjamin Pi- wowarski, and Jacopo Staiano. 2019. Answers unite! unsupervised metrics for reinforced summa- rization models. arXiv preprint arXiv:1909.01610.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "An entity-driven framework for abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Eva", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luyang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhe", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3271--3282", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eva Sharma, Luyang Huang, Zhe Hu, and Lu Wang. 2019. An entity-driven framework for abstractive summarization. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 3271-3282.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Beyond sumbasic: Taskfocused summarization with sentence simplification and lexical expansion", |
|
"authors": [ |
|
{ |
|
"first": "Lucy", |
|
"middle": [], |
|
"last": "Vanderwende", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hisami", |
|
"middle": [], |
|
"last": "Suzuki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Brockett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ani", |
|
"middle": [], |
|
"last": "Nenkova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Information Processing & Management", |
|
"volume": "43", |
|
"issue": "6", |
|
"pages": "1606--1618", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lucy Vanderwende, Hisami Suzuki, Chris Brockett, and Ani Nenkova. 2007. Beyond sumbasic: Task- focused summarization with sentence simplification and lexical expansion. Information Processing & Management, 43(6):1606-1618.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Using only cross-document relationships for both generic and topic-focused multidocument summarizations", |
|
"authors": [ |
|
{ |
|
"first": "Xiaojun", |
|
"middle": [], |
|
"last": "Wan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Information Retrieval", |
|
"volume": "11", |
|
"issue": "1", |
|
"pages": "25--49", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaojun Wan. 2008. Using only cross-document rela- tionships for both generic and topic-focused multi- document summarizations. Information Retrieval, 11(1):25-49.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Graph-based multi-modality learning for topic-focused multidocument summarization", |
|
"authors": [ |
|
{ |
|
"first": "Xiaojun", |
|
"middle": [], |
|
"last": "Wan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianguo", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Twenty-First International Joint Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaojun Wan and Jianguo Xiao. 2009. Graph-based multi-modality learning for topic-focused multi- document summarization. In Twenty-First Inter- national Joint Conference on Artificial Intelligence. Citeseer.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Manifold-ranking based topic-focused multidocument summarization", |
|
"authors": [ |
|
{ |
|
"first": "Xiaojun", |
|
"middle": [], |
|
"last": "Wan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianwu", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianguo", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "IJCAI", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "2903--2908", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaojun Wan, Jianwu Yang, and Jianguo Xiao. 2007. Manifold-ranking based topic-focused multi- document summarization. In IJCAI, volume 7, pages 2903-2908.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "A reinforced topic-aware convolutional sequence-to-sequence model for abstractive text summarization", |
|
"authors": [ |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junlin", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yunzhe", |
|
"middle": [], |
|
"last": "Tao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiang", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Joint Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4453--4460", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Li Wang, Junlin Yao, Yunzhe Tao, Li Zhong, Wei Liu, and Qiang Du. 2018. A reinforced topic-aware con- volutional sequence-to-sequence model for abstrac- tive text summarization. In Proceedings of the 27th International Joint Conference on Artificial Intelli- gence, pages 4453-4460.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Simple statistical gradientfollowing algorithms for connectionist reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Ronald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "Machine learning", |
|
"volume": "8", |
|
"issue": "3-4", |
|
"pages": "229--256", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ronald J Williams. 1992. Simple statistical gradient- following algorithms for connectionist reinforce- ment learning. Machine learning, 8(3-4):229-256.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Aspect and sentiment aware abstractive review summarization", |
|
"authors": [ |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiang", |
|
"middle": [], |
|
"last": "Qu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ying", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiao", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jia", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th international conference on computational linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1110--1120", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Min Yang, Qiang Qu, Ying Shen, Qiao Liu, Wei Zhao, and Jia Zhu. 2018. Aspect and sentiment aware ab- stractive review summarization. In Proceedings of the 27th international conference on computational linguistics, pages 1110-1120.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Bertscore: Evaluating text generation with bert", |
|
"authors": [ |
|
{ |
|
"first": "Tianyi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Varsha", |
|
"middle": [], |
|
"last": "Kishore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Kilian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Weinberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Artzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q Weinberger, and Yoav Artzi. 2019. Bertscore: Eval- uating text generation with bert. In International Conference on Learning Representations.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Statistics for our focused version of the CNN-Daily Mail and NYT datasets.", |
|
"html": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Summarization experiment results on the test sets with the ROUGE and BERTSCORE rewards. The model with no reward is a summarizer model trained only on the cross-entropy objective.", |
|
"html": null, |
|
"content": "<table><tr><td>DATASET</td><td/><td colspan=\"2\">CNN-DAILY MAIL</td><td/><td colspan=\"2\">NEW YORK TIMES</td><td/></tr><tr><td>REWARD</td><td>CRITERIA</td><td colspan=\"6\">Negative Self-Critical Tie Negative Self-Critical Tie</td></tr><tr><td>ROUGE</td><td>Relevance to Topic Fluency</td><td>25 17</td><td>21 21</td><td>34 42</td><td>21 14</td><td>14 12</td><td>45 54</td></tr><tr><td>BERTSCORE</td><td>Relevance to Topic Fluency</td><td>19 16</td><td>15 11</td><td>46 51</td><td>22 18</td><td>18 15</td><td>40 47</td></tr></table>" |
|
} |
|
} |
|
} |
|
} |