|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:47:27.660122Z" |
|
}, |
|
"title": "TLDR9+: A Large Scale Resource for Extreme Summarization of Social Media Posts", |
|
"authors": [ |
|
{ |
|
"first": "Sajad", |
|
"middle": [], |
|
"last": "Sotudeh", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "IRLab", |
|
"institution": "Georgetown University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hanieh", |
|
"middle": [], |
|
"last": "Deilamsalehy", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Franck", |
|
"middle": [], |
|
"last": "Dernoncourt", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Nazli", |
|
"middle": [], |
|
"last": "Goharian", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "IRLab", |
|
"institution": "Georgetown University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Recent models in developing summarization systems consist of millions of parameters and the model performance is highly dependent on the abundance of training data. While most existing summarization corpora contain data in the order of thousands to one million, generation of large-scale summarization datasets in order of couple of millions is yet to be explored. Practically, more data is better at generalizing the training patterns to unseen data. In this paper, we introduce TLDR9+-a largescale summarization dataset-containing over 9 million training instances extracted from Reddit discussion forum (https://github. com/sajastu/reddit_collector). This dataset is specifically gathered to perform extreme summarization (i.e., generating onesentence summary in high compression and abstraction) and is more than twice larger than the previously proposed dataset. We go one step further and with the help of human annotations, we distill a more finegrained dataset by sampling High-Quality instances from TLDR9+ and call it TLDRHQ dataset. We further pinpoint different state-ofthe-art summarization models on our proposed datasets.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Recent models in developing summarization systems consist of millions of parameters and the model performance is highly dependent on the abundance of training data. While most existing summarization corpora contain data in the order of thousands to one million, generation of large-scale summarization datasets in order of couple of millions is yet to be explored. Practically, more data is better at generalizing the training patterns to unseen data. In this paper, we introduce TLDR9+-a largescale summarization dataset-containing over 9 million training instances extracted from Reddit discussion forum (https://github. com/sajastu/reddit_collector). This dataset is specifically gathered to perform extreme summarization (i.e., generating onesentence summary in high compression and abstraction) and is more than twice larger than the previously proposed dataset. We go one step further and with the help of human annotations, we distill a more finegrained dataset by sampling High-Quality instances from TLDR9+ and call it TLDRHQ dataset. We further pinpoint different state-ofthe-art summarization models on our proposed datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Text summarization is defined as generating a concise sequence of text as summary, given relatively a longer document as source. A high-quality summary conveys the most important points of its associated source. The task is generally performed in two ways: 1) extractive in which salient sentences are identified and concatenated to form the final summary (Nallapati et al., 2017; Dong et al., 2018; Sotudeh et al., 2021a; Narayan et al., 2020; Cho et al., 2020) ; and 2) abstractive that produces a paraphrasing of the main contents of the given text. (See et al., 2017; Gehrmann et al., 2018 ; MacAvaney *Work done during the internship at Adobe Research.", |
|
"cite_spans": [ |
|
{ |
|
"start": 356, |
|
"end": 380, |
|
"text": "(Nallapati et al., 2017;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 381, |
|
"end": 399, |
|
"text": "Dong et al., 2018;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 400, |
|
"end": 422, |
|
"text": "Sotudeh et al., 2021a;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 444, |
|
"text": "Narayan et al., 2020;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 445, |
|
"end": 462, |
|
"text": "Cho et al., 2020)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 553, |
|
"end": 571, |
|
"text": "(See et al., 2017;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 572, |
|
"end": 593, |
|
"text": "Gehrmann et al., 2018", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We go to school together, we have three lessons a week together. She normally sits at the front and I sit at the back, but recently the person I sit next to has been struggling with mental health and hasn't been in, so I moved and sit next to her most lessons. We also do this engineering scheme together, so we have maybe half an hour a week with two other people working on that. For a while now we 've texted each other a few times a week with pictures of our cats, since we both love them. Outside of that, we don't really hang out at all. I see a lot of theatre, and about a week ago she said she wanted to come see something with me. So I agree, I love showing people theatre. When we find our seats, mine has a pole in the way so I can't see a section of the stage unless I lean away from her, but her view is perfect. About half an hour in, she leans on my shoulder. Halfway through act 2 she starts hugging my arm, while still leaning on my shoulder. She was kind of cuddling all day, we went to an arcade earlier as well. She doesn't seem like the cuddling type of friend, and I'm very worried she has a crush on me. I don't want to ruin a friendship, I don't like her back. Should I just ignore it until she asks me? What if she thinks that was a date? TL;DR I took my friend to see a show, she leant on my shoulder the whole time. I 'm not into her but I think she has a crush on me? Figure 1 : An example Reddit post with TLDR summary. As seen, the TLDR summary is extremely short, and highly abstractive. Zhang et al., 2019; Sotudeh et al., 2020a; Lewis et al., 2020; Lebanoff et al., 2020) and is considered more challenging as the model needs to deal with novel words generation beyond sentence extraction.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1519, |
|
"end": 1538, |
|
"text": "Zhang et al., 2019;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 1539, |
|
"end": 1561, |
|
"text": "Sotudeh et al., 2020a;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 1562, |
|
"end": 1581, |
|
"text": "Lewis et al., 2020;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1582, |
|
"end": 1604, |
|
"text": "Lebanoff et al., 2020)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1396, |
|
"end": 1404, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Over the past few years, different neural models including RNN (Hochreiter and Schmidhuber, 1997) and Transformer-based (Vaswani et al., 2017) networks have been proposed to facilitate the summarization task. While promising, the performance of such models is bound to the abundance of training data due to the massive model complexity (Ying, 2019) . Lack of sufficient training data worsens the model's ability to generalize patterns in training data to unseen data (Althnian et al., 2021) . In addition, overfitting will be likely inevitable as the model is forced to learn from a limited set of data; hence, hindering the generalization. This justifies the necessity of large-scale corpora for training large and complex models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 63, |
|
"end": 97, |
|
"text": "(Hochreiter and Schmidhuber, 1997)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 120, |
|
"end": 142, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 336, |
|
"end": 348, |
|
"text": "(Ying, 2019)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 467, |
|
"end": 490, |
|
"text": "(Althnian et al., 2021)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Prevalence of social media platforms has provided communities with an opportunity to ex- change different types of data while interacting with each other. Reddit 1 is one of such popular platforms where users post their content of interest in a variety of domains. TLDR, acronym for \"Too Long; Didn't Read\", is a common practice that aims at removing unnecessary information from the lengthy post, and presenting its gist information in a few words. Figure 1 shows a sample of Reddit post with its TLDR, which aims at abstracting post with extreme compression. Abundance of posts that contain such TLDRs during the recent years has given rise to generation of data collections that can be utilized for training deep neural networks; hence, addressing the challenge of largescale datasets' scarcity. Despite the possibility of acquiring large-scale datasets from social media platforms, training deep neural networks on such datasets is yet challenging. This might be due to the specific writing style of social media content such as informal language and massive noise within such content (Sotudeh et al., 2020b) . Table 1 shows some of the existing summarization datasets in social and non-social media domains. These datasets are specifically proposed for extreme summarization task, where the aim is to produce one to two summary sentences in extreme compression and high abstraction. In this paper, we introduce our dataset, TLDR9+ with over 9 millions instances which is more than twice larger than the previous dataset (V\u00f6lske et al., 2017) . We further sample high-quality instances in virtue of human annotations from TLDR9+ to construct TL-DRHQ yielding 1.7 million instances in the hope of providing firm grounds for future work. Owing to extremely short length of TLDR summaries (less that 40 words), our datasets are rather suitable for extreme summarization task, than for longer ones.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1089, |
|
"end": 1112, |
|
"text": "(Sotudeh et al., 2020b)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 1525, |
|
"end": 1546, |
|
"text": "(V\u00f6lske et al., 2017)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 450, |
|
"end": 458, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1115, |
|
"end": 1122, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this research, we aim at harvesting instances that include TLDRs written by the Reddit users spanning the period of 2005-2021. Our early attempt at gathering such instances yields over 9 millions instances with TLDRs as the initial set (i.e., TLDR9+). Since social media posts are inherently noisy, we consider applying a heuristic method to cut out low-quality instances from the initial set, which ultimately results in 1.7 million high-quality instance. For deciding such heuristic, we employ human annotators to help obtaining a more finegrained dataset (i.e., TLDRHQ). Furthermore, we establish various state-of-the-art extractive and abstractive summarization models on our proposed datasets. Finally, we carry out an analysis over the results on both datasets to shed lights on future direction. We believe that our datasets can be utilized to pave the path for future research. Our miner code and data are made publicly available at https:// github.com/sajastu/reddit_collector, along with the licensing details included.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Over the past few years, summarization community has witnessed variety of summarization datasets in different domains (See et al., 2017; Cohan et al., 2018; Kornilova and Eidelman, 2019; Grusky et al., 2018; Sotudeh et al., 2021b) . While these collections have provided a fair basis to perform different neural text summarization models, the necessity of introducing large-scale collections, in magnitude of over 4 millions, has not been much explored.", |
|
"cite_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 136, |
|
"text": "(See et al., 2017;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 137, |
|
"end": 156, |
|
"text": "Cohan et al., 2018;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 186, |
|
"text": "Kornilova and Eidelman, 2019;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 187, |
|
"end": 207, |
|
"text": "Grusky et al., 2018;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 208, |
|
"end": 230, |
|
"text": "Sotudeh et al., 2021b)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Among the first attempts on this track, Rush et al. (2015) gathered the English Gigaword corpus (Graff et al., 2003) which contains around 4 millions article-headline pairs for the task of news headline generation. Researchers have noted that lead bias is the common phenomenon in most news datasets, where early parts of the article generally include the most important information (Kedzie et al., 2018; Zhu et al., 2019; Grenander et al., 2019) . To alleviate the lead bias for training summarization models, there have been recent efforts to propose summarization datasets, where the lead bias phenomenon is mitigated and summaries are sampled from diverse source regions. Amongst those, Sharma et al. (2019) proposed BIGPATENT, consisting 1.3 million patent documents, collected from Google Patents Public Datasets, with human- written abstractive summaries. Kim et al. (2019) proposed Reddit TIFU in which the abstractive gold summaries are sampled from diverse regions of the source document, rather than lead regions. Our proposed datasets are more suited for the task of extreme summarization (Narayan et al., 2018; Cachola et al., 2020) , where the task is to create a short one-sentence summary. To this end, Narayan et al. (2018) proposed XSUM dataset which is a real-word dataset compiling online articles from the British Broadcasting Corportation (BBC). TLDR generation task is also a new form of extreme summarization. Kim et al. (2019) collected Reddit-TIFU dataset, consisting of 120K posts from the online discussions from Reddit. Recent efforts have mined around 4 millions Reddit posts along with their TLDR summaries (V\u00f6lske et al., 2017) which resulted in Webis-TLDR-17 dataset. While our work is similar to theirs, our collected dataset is more than twice larger than the one previously proposed.", |
|
"cite_spans": [ |
|
{ |
|
"start": 40, |
|
"end": 58, |
|
"text": "Rush et al. (2015)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 96, |
|
"end": 116, |
|
"text": "(Graff et al., 2003)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 383, |
|
"end": 404, |
|
"text": "(Kedzie et al., 2018;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 405, |
|
"end": 422, |
|
"text": "Zhu et al., 2019;", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 446, |
|
"text": "Grenander et al., 2019)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 691, |
|
"end": 711, |
|
"text": "Sharma et al. (2019)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 863, |
|
"end": 880, |
|
"text": "Kim et al. (2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1101, |
|
"end": 1123, |
|
"text": "(Narayan et al., 2018;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1124, |
|
"end": 1145, |
|
"text": "Cachola et al., 2020)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1219, |
|
"end": 1240, |
|
"text": "Narayan et al. (2018)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1434, |
|
"end": 1451, |
|
"text": "Kim et al. (2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1638, |
|
"end": 1659, |
|
"text": "(V\u00f6lske et al., 2017)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Reddit is a social news aggregation, and discussion website platform that has been officially launched since June 2005. It supports some features specific to social platforms such as web content rating though up-voting, and discussion topics via subreddits. The user-created content can be of any domain such as News, Politics, Science, Sport and etc. Users can post or comment on a specific topic which falls into a specific subreddit. Within subreddits, users submit their post as submission, and others can react through commenting under the posted submission. Each submission and comment has a text body/selftext which reflects the users' information exchange regarding a specific topic. The existence of social platforms such as Reddit has provided the research community with an opportunity to experiment with resources that use informal language, rather than those in news, scientific or legal documents which use formal language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Collection", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "TLDR-Too Long; Didn't Read-is a common practice in Reddit that often appears at the end of long reddit posts. It is denoted as an extremely short summary that urges users to read shorter version of a longer text when they do not have time to read the entire posts. Figure 2 shows the ratio of posts containing such TLDR summaries over the entire submitted posts (and comments) across different years. It is observable that although we see an ascending trend since 2005, the number of TLDRs remains fixed (see Section 3.4) while the number of posts increases drastically.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 265, |
|
"end": 273, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Collection", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Pushshift 2 is a social media data repository platform that has been recently made available to NLP researchers (Baumgartner et al., 2020) . It contains recent and historical dumps of Reddit posts that are updated in real-time. In order to create the TLDR dataset, we downloaded the whole data dumps (submissions and comments) which covers the period of 2005-2021, and extracted instances that contain TLDRs within the posted source text. This mining process resulted in TLDR9+ dataset, that contains over 9 millions instances. To acquire a more fine-grained dataset, with the help of human annotations, we obtained TLDRHQ dataset, con-sisting of 1.7 millions high-quality instances. The datasets' construction details are discussed in what follows.", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 138, |
|
"text": "(Baumgartner et al., 2020)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Collection", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "TLDR9+. After downloading Reddit data dumps, we extract posts in which a mention of TLDR-style keywords is found. To find TLDR-style keywords within a given text, we declare a regular expression that matches words starting with \"TL\" and ending with \"DR\", with permission of having up to three characters in-between as also done by V\u00f6lske et al. (2017) . This stage yields the TLDR9+ dataset as the full corpus. At the next filtering stage, we utilize a heuristic method along with human supervision to narrow down to a more fine-grained dataset that contain high-quality instances. TLDRHQ. A few studies have noted that usergenerated content in social media platforms is noisy (Liu and Inkpen, 2015) in terms of having spams, bad grammar, and spelling errors. To filter out such noisy instances from the TLDR9+ dataset, we use a heuristic method to drop low-quality instances while retaining high-quality ones. To be more specific, given a post-TLDR pair, we firstly identify the highest score source sentence in terms of ROUGE-2 and ROUGE-L mean scores (i.e., oracle sentence). The choice of oracle sentence lies in the fact that we postulate to extract a sentence from the longer post that has the highest similarity with the TLDR summary as the gold standard. We then decide to either drop or retain the instance if the score surpasses a pre-defined threshold. We experiment with different thresholds of 0.15, 0.17, 0.20, 0.22 and 0.25, and choose one considering the annotations done by human annotators. The details of human annotation process is discussed in what follows.", |
|
"cite_spans": [ |
|
{ |
|
"start": 331, |
|
"end": 351, |
|
"text": "V\u00f6lske et al. (2017)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 677, |
|
"end": 699, |
|
"text": "(Liu and Inkpen, 2015)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets Construction: TLDR9+ and TLDRHQ", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "As mentioned earlier, we first define 5 fixed thresholds including 0.15, 0.17, 0.20, 0.22, and 0.25 to create 5 data subsets from TLDR9+ dataset. Specifically, we take TLDR9+ as the initial seed, from which 5 subsets is created as follows. To gather instances for each of the pre-defined thresholds, we check if the oracle sentence's score in the given instance surpasses the experimented threshold. If it does so, we add it to the subset, otherwise it is dropped. We then randomly sample 20 cases from each of these subsets with their oracle sentence and TLDR summaries, yielding 100 cases for annotation in total. We have four human annotators from our NLP group either confirm (labeling with 1) or reject (labeling with 0) if the oracle sentence validates the TLDR summary. By definition, the sentence validates the TLDR summary if at least one fragment can be found within the sentence that semantically occurs in TLDR summary. We further provide the instances' text (i.e., source) as the \"Context\" for the oracle sentence, and ask the annotators to confirm or reject if the context also validates the TLDR summary. Context is specifically important for the cases where the oracle sentence does not validate the TLDR summary. In fact, by providing context, we aspire to verify if an ideal summarizer is able to generate the TLDR using the context when the oracle sentence is not much informative. For tie cases 3 , we employ a fifth annotator to make the final decision. Table 2 : Average decision scores given by the annotators for each threshold. Table 2 presents the average decision score assigned to the samples on each threshold. The decision score for a given sample is defined as the annotators' average confidence at giving label 1 to that specific sample. If the average confidence score surpasses 0.50, we assign 1 and if it is below 0.50, the sample is annotated with 0. Otherwise, the fifth annotator decides the label. As shown, threshold 0.22 attains the full score in the presence and absence of the context. Overall, this shows that most of the annotators believe the TLDR can be distilled considering both oracle sentence and the entire source. Figure 3 shows pair-wise inter-rater S score agreement (Bennet et al., 1954) throughout the annotation process on threshold 0.22, denoting that annotators have mostly slight or fair agreement in labeling process. Specifically, when the context is not provided (i.e., merely with consideration of oracle sentence), raters (2, 4), (2, 3), and (1, 3) have 8.0%", |
|
"cite_spans": [ |
|
{ |
|
"start": 2222, |
|
"end": 2243, |
|
"text": "(Bennet et al., 1954)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1475, |
|
"end": 1482, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1553, |
|
"end": 1560, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2167, |
|
"end": 2175, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Human Annotation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "10.3%", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Annotation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "12.2%", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Annotation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "10.4% 10.0% 10.2% 9.9%", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Annotation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "11.9%", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Annotation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "6.0%", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Annotation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "TLDRs per year (Submissions) 2 0 0 9 2 0 1 0 2 0 1 1 2 0 1 2 2 0 1 3 2 0 1 4 2 0 1 5 2 0 1 6 2 0 1 7 2 0 1 8 2 0 1 9 2 0 2 0 10.2% 10.2% 10.3% 10.1% 11.0% 11.5% 11.8% 11.3%", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Annotation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "TLDRs per year (Comments) (a) submission-TLDR (b) comment-TLDR Figure 4 : The proportion of instances containing TLDR in TLDR9+ dataset. As seen, the number of TLDRs is increasing each year. At the time of conducting this research, the submission data dumps are partially uploaded for 2021 (until 2021-06), while there is no comments uploaded for 2021 in the Pushshift repository.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 63, |
|
"end": 71, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Human Annotation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "quite a high rate of agreement. On the other hand, most pairs of annotators including (1, 2), (1, 4), and (2, 4) achieve a high agreement rate when the context is given. As the given decision scores -either only with oracle sentence or provided contextsum up to 1.0, and considering moderately high agreement rate between the annotators, we decide to sample our TLDRHQ dataset from the instances in that was in threshold 0.22's subset. This leads us to choose human-decided threshold 0.22 as our ground to sample High-Quality TLDRs for constructing TLDRHQ dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Annotation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In this section, we give statistics, along with analyses on the proposed datasets. Table 3 shows general statistics of datasets in terms of post and TLDR length. As shown, the compression rate 4 is 8.7 and 12.5 in TLDR9+, and TLDRHQ datasets, respectively. This shows that authors generally tend to write much shorter TLDRs that highly shortens the post's text, which is expected due to the nature of TLDR summaries. Figure 4 demonstrates the number of TLDR pairs in TLDR9+ across different years. As observed, 83.65% of these TLDRs occur after 2013 which shows the popularity of this writing style among the Reddit users. We also see a similar trend for years after 2013, each of which constitutes a fixed amount (10%-12%) of the dataset. Table 4 demonstrates the detailed information including data size, sentence length and vocabulary statistics of TL-DRHQ dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 90, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 417, |
|
"end": 425, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 740, |
|
"end": 747, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset Analysis", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "As mentioned earlier, we define the oracle sentence to be the one within the longer post that has the highest overlap with TLDR summary in terms of ROUGE-2 and ROUGE-L mean scores. The oracle sentence's relative position in post's text along with its importance is shown in Figure 5 (a) . We define the oracle importance score as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 274, |
|
"end": 286, |
|
"text": "Figure 5 (a)", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset Analysis", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "oracle importance = max RG 2+L (s i ) s i \u2208D RG 2+L", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset Analysis", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "where D is the set of all sentences within the post, and s i denotes the ith sentence. RG 2+L (.) is a function that takes in a post's sentence, and outputs the mean of its ROUGE-2 and ROUGE-L score with respect to TLDR summary. Intuitively, the oracle importance score can be framed as the attention score over the oracle sentences when the scoring function is ROUGE. Observing Figure 5 , while more of the oracle sentences occur in early parts of the post's text (< 0.10) with importance score of less than 0.30, it appears that the oracle sentences are spread out across the post's text overall. This observation is substantial, justifying the usability of this dataset for extractive summarization task.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 379, |
|
"end": 387, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset Analysis", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "To analyze the abstraction level of TLDRHQ dataset, we plot the percentage of novel n-grams within the TLDR summary (See et al., 2017) in Figure 5 (b), as well as the TLDR's n-gram abstractiveness (Gehrmann et al., 2019) in Figure 5 (c) over the all instances in TLDRHQ dataset. As indicated, there are quite a large proportion of novel n-gram words appeared in the TLDR summary as the heat extent is mostly concentrated in the upper half of the y-axis. These plots show the promising capability and challenges of this dataset to be used for abstractive summarization models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 116, |
|
"end": 134, |
|
"text": "(See et al., 2017)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 197, |
|
"end": 220, |
|
"text": "(Gehrmann et al., 2019)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 138, |
|
"end": 146, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 224, |
|
"end": 232, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset Analysis", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "We benchmark several extractive and abstractive summarization baselines over our two proposed datasets. BERTSUMEXT. (Liu and Lapata, 2019 ) Bert-SumExt model is the extractive variant of BERT-SUM which is the BERT Model fine-tuned on text summarization task. In this regard, BERT [CLS] tokens are appended to the start of each input sentence, and their associated representations are used to predict if the sentence should be included in the final summary or not. BERTSUMABS. (Lewis et al., 2020) BERTSUM-ABS is the abstractive model of BERTSUM, where a Transformers-based decoder is added to the BERT Encoder. BART. (Lewis et al., 2020) BART is a regressive autoencoder model that is pre-trained by first corrupting the text with an arbitrary noising function, and secondly, trying to reconstruct the original input text. BART is particularly effective when fine-tuned on text generation tasks such as summarization. As BART has both encoder and decoder pre-trained, it can be perceived as an extension to general BERT models in which only encoder is pre-trained.", |
|
"cite_spans": [ |
|
{ |
|
"start": 116, |
|
"end": 137, |
|
"text": "(Liu and Lapata, 2019", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 280, |
|
"end": 285, |
|
"text": "[CLS]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 476, |
|
"end": 496, |
|
"text": "(Lewis et al., 2020)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 617, |
|
"end": 637, |
|
"text": "(Lewis et al., 2020)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "TLDRHQ Model", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "TLDR9+", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "RG-1(%) RG-2(%) RG-L(%) RG-1(%) RG-2(%) RG-L(%)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "TLDR9+", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "BERTSUMEXT (Liu and Lapata, 2019) Table 5 : ROUGE (F1) results of the state-of-the-art summarization models on the test sets of the proposed TLDR summarization datasets (TLDR9+, and TLDRHQ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 33, |
|
"text": "(Liu and Lapata, 2019)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 34, |
|
"end": 41, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "TLDR9+", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We randomly split our datasets to construct training, validation, and test sets. Specifically, for TLDR9+, we use 99-0.5-0.5 split which results in 9,139,935 (train), 43,753 (validation), and 43,749 (test) instances. To split TLDRHQ, we use 95-2.5-2.5 division yielding 1,590,132 (train), 40,481 (validation), and 40,486 (test) pairs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "To train the summarization models, we utilize Hug-gingFace's Transformers (Wolf et al., 2020) for BART, and the open implementation 5 of BERT-SUMEXT, BERTSUMABS. We use warm-up steps of 32K, and 20K for BART and BERTSUM variants, respectively. The AdamW optimizer (Loshchilov and Hutter, 2019) is used with learning rate of 3e \u2212 5, beta parameter of 0.98, and weight decay of 0.01 for BART model. For BERTSUM variants, we use the default Adam (Kingma and Ba, 2015) optimizer with learning rates of 2e \u2212 3 for the encoder, and 1e \u2212 2 for the decoder as suggested by the main paper (Liu and Lapata, 2019) . For all models, we use cross-entropy loss function. We train the models on 8 Nvidia Tesla V100 GPUs for 5 epochs with early stopping of the training when the validation loss does not decrease for 3 consecutive validation steps. The validation step is done every 25K training steps. To visualize and keep track of the learning process, we use Weight and Biases (Biewald, 2020) toolkit. Table 5 presents the performance of the state-of-theart summarization models on our proposed datasets in terms of ROUGE-1, ROUGE-2, and ROUGE-L scores. As indicated, BART outperforms all other models across all ROUGE variants in both datasets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 93, |
|
"text": "(Wolf et al., 2020)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 264, |
|
"end": 293, |
|
"text": "(Loshchilov and Hutter, 2019)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 580, |
|
"end": 602, |
|
"text": "(Liu and Lapata, 2019)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 965, |
|
"end": 980, |
|
"text": "(Biewald, 2020)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 990, |
|
"end": 997, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Training and Hyper-parameters", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "5 https://github.com/nlpyang/PreSumm This is expected as BART's both encoder and decoder have been pre-trained on a large amount of unlabelled data, unlike BERTSUM variants that only have pre-trained encoders.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Comparing abstractive models with BERT-SUMEXT, we observe relatively large performance gap. This might be due to the fact that TLDRs in both TLDR9+ and TLDRHQ datasets are rather abstractive than extractive as also shown in Section 3.4. Yet with the existence of such a huge gap, the ORACLE-EXT (i.e., upper bound of an extractive summarizer) scores prove that more developed extractive summarizers can perform outof-the-box and mitigate this gap. The performance gap on TLDR9+ brings various challenges to develop summarization models that better fit on the larger dataset that include noisy data (Kumar et al., 2020) . This noise might be handled via methods such as noise-aware training models (Namysl et al., 2020) , while enabling the models to benefit from the large-scale TLDR9+ dataset. We leave this part for future work. It has to be mentioned that automatic evaluation of summarization continues to be an issue and while this dataset does not solve that, instead can be used with any evaluation metric as they evolve.", |
|
"cite_spans": [ |
|
{ |
|
"start": 598, |
|
"end": 618, |
|
"text": "(Kumar et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 697, |
|
"end": 718, |
|
"text": "(Namysl et al., 2020)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "To gain insights into the qualities of summarization model, we analyze the outputs generated by the models. The diagrams demonstrating n-gram abstractiveness and percentage of novel n-grams, generated by BART and BERTSUMABS, are plotted in Figure 6 . As observed, BART model appears to have a similar trend to the ground truth TLDRs. On the other hand, BERTSUMABS model has increasing n-gram abstractiveness, and novel n-gram percentage with increasing n. It is also interesting that after 6-gram, BERTSUMEXT model reaches a Figure 6 : The n-gram abstractiveness and percentage of novel n-gram metrics across different n-grams on TLDRHQ's test set. As seen, BART generates more abstractive summaries than BERTSUMABS as it mitigates the gap between BERTSUMABS and ground truth summary.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 240, |
|
"end": 248, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 525, |
|
"end": 533, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "plateau when generating novel n-grams, but we a drop after 3-grams for BART and the ground truth TLDRs. This shows that from 1-gram to 3-gram, there are increasing number of novel words appeared in the ground-truth and BART, but after that, they both tend to copy n-grams rather than generating those.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "To understand the limitation and qualities of current state-of-the-art summarization models, we conduct a qualitative analysis on several samples from TLDRHQ dataset, of which one is shown in Figure 7 . Analyzing this sample, we observe that BART generated a better summary in terms of faithfulness to the ground truth TLDR. On the other hand, while BERTSUMABS could identify the important region of the source document, it has produced a longer TLDR with additional information that is present in the source, but not in the ground truth summary. BERTSUMEXT model could have identified a source sentence which is partly in connection with the ground truth TLDR, but it leaves out the most important sentence as the oracle to be extracted. Considering the upper performance of extractive summarizers (i.e., ORACLE-EXT score in Table 5 ), we believe that there is a large room for improvement on this dataset. Investigations of more advanced models remains for future work.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 192, |
|
"end": 201, |
|
"text": "Figure 7", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 827, |
|
"end": 834, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Let me start this off by saying I'm not sure if this is the right spot to ask , and matching is not really my forte. I have my nostril pierced , as well as my septum. I got them done earlier this year and I've been playing around with different jewelry .all my jewelry has been white gold / silver... until now (edit -I originally had a silver hoop in my nostril and it was constantly irritated so I read up on it and found that silver is not good for piercings so I only use 14k white gold currently ). I purchased a 14k solid rose gold nose hoop (20g). I'm curious if it would look weird wearing a rose gold nose hoop with a white gold seamless septum ring (16g) ?? or any white gold septum jewelry? I don't want to look like a fool who can't match her facial jewelry! BertSumAbs. would it look weird wearing a rose gold nose hoop with a white gold seamless septum ring (16g) ?? or any white gold septum jewelry ? I don't want to look like a fool who can't match her facial jewelry .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "would it look weird to wear a rose gold nosering with a white gold hoop septum ring?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BART.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Ground truth. would it look weird to wear a rose gold hoop in my nostril with a white gold hoop in my septum?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BART.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "BertSumExt.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BART.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "I purchased a 14k solid rose gold nose hoop (20g). Figure 7 : A sample from TLDRHQ test set along with the model generated summaries. Underlined text in source shows the important regions of the source for generating TLDR summary.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 51, |
|
"end": 59, |
|
"text": "Figure 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "BART.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this paper, we proposed two large-scale summarization datasets called TLDR9+, and TLDRHQ. The TLDR9+ dataset contains over 9 millions Reddit post-TLDR instances. To distill a more finegrained dataset out of TLDR9+, we sample highquality instances with the help of human annotations to construct TLDRHQ. Our analyses over TLDR9+ and TLDRHQ datasets show its usability for performing both extractive and abstractive summarization tasks. We further establish extractive and abstractive baseline results using state-of-theart summarization models on both datasets. We hope our datasets can pave the path for future studies on this direction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "https://www.reddit.com/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://files.pushshift.io/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Suppose a case where two annotators confirm (label 1), while the other two reject (label 0).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We warmly thank the anonymous reviewers as well as Tracy King for their helpful feedback and suggestions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Alanoud Bin Dris, Najla Alzakari, A. A. Elwafa, and H. Kurdi. 2021. Impact of dataset size on classification performance: An empirical evaluation in the medical domain", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Althnian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Alsaeed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Heyam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amani", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Al-Baity", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Samha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Applied Sciences", |
|
"volume": "11", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Althnian, D. AlSaeed, Heyam H. Al-Baity, Amani K. Samha, Alanoud Bin Dris, Najla Alza- kari, A. A. Elwafa, and H. Kurdi. 2021. Impact of dataset size on classification performance: An em- pirical evaluation in the medical domain. Applied Sciences, 11:796.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "The pushshift reddit dataset", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Baumgartner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Savvas", |
|
"middle": [], |
|
"last": "Zannettou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Keegan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Megan", |
|
"middle": [], |
|
"last": "Squire", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Blackburn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "ICWSM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Baumgartner, Savvas Zannettou, Brian Kee- gan, Megan Squire, and J. Blackburn. 2020. The pushshift reddit dataset. In ICWSM.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Communications Through Limited-Response Questioning*", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Bennet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Alpert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Goldstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1954, |
|
"venue": "Public Opinion Quarterly", |
|
"volume": "18", |
|
"issue": "3", |
|
"pages": "303--308", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1086/266520" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "E. M. Bennet, R. Alpert, and A. C. Goldstein. 1954. Communications Through Limited-Response Ques- tioning*. Public Opinion Quarterly, 18(3):303-308.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Experiment tracking with weights and biases. Software available from wandb", |
|
"authors": [ |
|
{ |
|
"first": "Lukas", |
|
"middle": [], |
|
"last": "Biewald", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lukas Biewald. 2020. Experiment tracking with weights and biases. Software available from wandb.com.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Tldr: Extreme summarization of scientific documents", |
|
"authors": [ |
|
{ |
|
"first": "Isabel", |
|
"middle": [], |
|
"last": "Cachola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Lo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arman", |
|
"middle": [], |
|
"last": "Cohan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Weld", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "FINDINGS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Isabel Cachola, Kyle Lo, Arman Cohan, and Daniel S. Weld. 2020. Tldr: Extreme summarization of scien- tific documents. In FINDINGS.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Better highlighting: Creating sub-sentence summary highlights", |
|
"authors": [ |
|
{ |
|
"first": "Sangwoo", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaiqiang", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Foroosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sangwoo Cho, Kaiqiang Song, Chen Li, Dong Yu, H. Foroosh, and Fei Liu. 2020. Better highlight- ing: Creating sub-sentence summary highlights. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A discourse-aware attention model for abstractive summarization of long documents", |
|
"authors": [ |
|
{ |
|
"first": "Arman", |
|
"middle": [], |
|
"last": "Cohan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franck", |
|
"middle": [], |
|
"last": "Dernoncourt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soon", |
|
"middle": [], |
|
"last": "Doo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trung", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Seokhwan", |
|
"middle": [], |
|
"last": "Bui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nazli", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Goharian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arman Cohan, Franck Dernoncourt, Doo Soon Kim, Trung Bui, Seokhwan Kim, W. Chang, and Nazli Goharian. 2018. A discourse-aware attention model for abstractive summarization of long documents. In NAACL-HLT.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Banditsum: Extractive summarization as a contextual bandit", |
|
"authors": [ |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yikang", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Crawford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Hoof", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Cheung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yue Dong, Yikang Shen, E. Crawford, H. V. Hoof, and J. Cheung. 2018. Banditsum: Extractive summariza- tion as a contextual bandit. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Bottom-up abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Sebastian Gehrmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Gehrmann, Y. Deng, and Alexander M. Rush. 2018. Bottom-up abstractive summarization. EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Generating abstractive summaries with finetuned language models", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Gehrmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Ziegler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "INLG", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Gehrmann, Zachary M. Ziegler, and Alexan- der M. Rush. 2019. Generating abstractive sum- maries with finetuned language models. In INLG.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "English gigaword. Linguistic Data Consortium", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Graff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junbo", |
|
"middle": [], |
|
"last": "Kong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ke", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazuaki", |
|
"middle": [], |
|
"last": "Maeda", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Graff, Junbo Kong, Ke Chen, and Kazuaki Maeda. 2003. English gigaword. Linguistic Data Consortium, Philadelphia, 4(1):34.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Countering the effects of lead bias in news summarization via multi-stage training and auxiliary losses", |
|
"authors": [ |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Grenander", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"C K" |
|
], |
|
"last": "Cheung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Annie", |
|
"middle": [], |
|
"last": "Louis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matt Grenander, Yue Dong, J. C. K. Cheung, and An- nie Louis. 2019. Countering the effects of lead bias in news summarization via multi-stage training and auxiliary losses. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Newsroom: A dataset of 1.3 million summaries with diverse extractive strategies", |
|
"authors": [ |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Grusky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Naaman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Artzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Max Grusky, M. Naaman, and Yoav Artzi. 2018. News- room: A dataset of 1.3 million summaries with di- verse extractive strategies. In NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Long shortterm memory", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural Computation", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "1735--1780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Hochreiter and J. Schmidhuber. 1997. Long short- term memory. Neural Computation, 9:1735-1780.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Content selection in deep learning models of summarization", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Kedzie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Mckeown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hal", |
|
"middle": [], |
|
"last": "Daum\u00e9", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Kedzie, K. McKeown, and Hal Daum\u00e9. 2018. Content selection in deep learning models of sum- marization. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Abstractive summarization of reddit posts with multi-level memory networks", |
|
"authors": [ |
|
{ |
|
"first": "Byeongchang", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hyunwoo", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gunhee", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Byeongchang Kim, Hyunwoo Kim, and Gunhee Kim. 2019. Abstractive summarization of reddit posts with multi-level memory networks. In NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. CoRR, abs/1412.6980.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Billsum: A corpus for automatic summarization of us legislation", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Kornilova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vladimir", |
|
"middle": [], |
|
"last": "Eidelman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Kornilova and Vladimir Eidelman. 2019. Billsum: A corpus for automatic summarization of us legisla- tion. ArXiv, abs/1910.00523.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Noisy text data: Achilles' heel of bert", |
|
"authors": [ |
|
{ |
|
"first": "Ankit", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piyush", |
|
"middle": [], |
|
"last": "Makhija", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anuj", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ankit Kumar, Piyush Makhija, and Anuj Gupta. 2020. Noisy text data: Achilles' heel of bert. In WNUT.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Learning to fuse sentences with transformers for summarization", |
|
"authors": [ |
|
{ |
|
"first": "Logan", |
|
"middle": [], |
|
"last": "Lebanoff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franck", |
|
"middle": [], |
|
"last": "Dernoncourt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soon", |
|
"middle": [], |
|
"last": "Doo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lidan", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Logan Lebanoff, Franck Dernoncourt, Doo Soon Kim, Lidan Wang, W. Chang, and Fei Liu. 2020. Learn- ing to fuse sentences with transformers for summa- rization. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marjan", |
|
"middle": [], |
|
"last": "Ghazvininejad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, A. Mohamed, Omer Levy, V. Stoy- anov, and Luke Zettlemoyer. 2020. Bart: Denoising sequence-to-sequence pre-training for natural lan- guage generation, translation, and comprehension. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Estimating user location in social media with stacked denoising auto-encoders", |
|
"authors": [ |
|
{ |
|
"first": "Ji", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Inkpen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "VS@HLT-NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ji Liu and D. Inkpen. 2015. Estimating user location in social media with stacked denoising auto-encoders. In VS@HLT-NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Text summarization with pretrained encoders", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "EMNLP/IJCNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Liu and Mirella Lapata. 2019. Text summariza- tion with pretrained encoders. In EMNLP/IJCNLP.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Decoupled weight decay regularization", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Loshchilov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Hutter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "I. Loshchilov and F. Hutter. 2019. Decoupled weight decay regularization. In ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Ontology-aware clinical abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Sean", |
|
"middle": [], |
|
"last": "Macavaney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sajad", |
|
"middle": [], |
|
"last": "Sotudeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arman", |
|
"middle": [], |
|
"last": "Cohan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nazli", |
|
"middle": [], |
|
"last": "Goharian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Ish", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ross", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Talati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Filice", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 42nd International ACM SIGIR Conference on Research and Development in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sean MacAvaney, Sajad Sotudeh, Arman Cohan, Nazli Goharian, Ish A. Talati, and Ross W. Filice. 2019. Ontology-aware clinical abstractive summarization. Proceedings of the 42nd International ACM SIGIR Conference on Research and Development in Infor- mation Retrieval.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Summarunner: A recurrent neural network based sequence model for extractive summarization of documents", |
|
"authors": [ |
|
{ |
|
"first": "Ramesh", |
|
"middle": [], |
|
"last": "Nallapati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Feifei", |
|
"middle": [], |
|
"last": "Zhai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ramesh Nallapati, Feifei Zhai, and Bowen Zhou. 2017. Summarunner: A recurrent neural network based se- quence model for extractive summarization of docu- ments. In AAAI.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Nat: Noise-aware training for robust neural sequence labeling", |
|
"authors": [ |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Namysl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sven", |
|
"middle": [], |
|
"last": "Behnke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Kohler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcin Namysl, Sven Behnke, and J. Kohler. 2020. Nat: Noise-aware training for robust neural se- quence labeling. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Don't give me the details, just the summary! topic-aware convolutional neural networks for extreme summarization", |
|
"authors": [ |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shay", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shashi Narayan, Shay B. Cohen, and Mirella Lapata. 2018. Don't give me the details, just the summary! topic-aware convolutional neural networks for ex- treme summarization. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Stepwise extractive summarization and planning with structured transformers", |
|
"authors": [ |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Maynez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakub", |
|
"middle": [], |
|
"last": "Ad\u00e1mek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniele", |
|
"middle": [], |
|
"last": "Pighin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Blavz", |
|
"middle": [], |
|
"last": "Bratanivc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Mc-Donald", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shashi Narayan, Joshua Maynez, Jakub Ad\u00e1mek, Daniele Pighin, Blavz Bratanivc, and Ryan T. Mc- Donald. 2020. Stepwise extractive summariza- tion and planning with structured transformers. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "A neural attention model for sentence summarization", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Rush", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Harvard", |
|
"middle": [], |
|
"last": "Seas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Chopra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexander M. Rush, Harvard Seas, S. Chopra, and J. Weston. 2015. A neural attention model for sen- tence summarization.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Get to the point: Summarization with pointergenerator networks", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "See", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. See, Peter J. Liu, and Christopher D. Manning. 2017. Get to the point: Summarization with pointer- generator networks. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Bigpatent: A large-scale dataset for abstractive and coherent summarization", |
|
"authors": [ |
|
{ |
|
"first": "Eva", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eva Sharma, Chen Li, and L. Wang. 2019. Bigpatent: A large-scale dataset for abstractive and coherent summarization. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "On generating extended summaries of long documents", |
|
"authors": [ |
|
{ |
|
"first": "Sajad", |
|
"middle": [], |
|
"last": "Sotudeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arman", |
|
"middle": [], |
|
"last": "Cohan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nazli", |
|
"middle": [], |
|
"last": "Goharian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "The AAAI-21 Workshop on Scientific Document Understanding (SDU)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sajad Sotudeh, Arman Cohan, and Nazli Goharian. 2021a. On generating extended summaries of long documents. The AAAI-21 Workshop on Scientific Document Understanding (SDU).", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "On generating extended summaries of long documents", |
|
"authors": [ |
|
{ |
|
"first": "Sajad", |
|
"middle": [], |
|
"last": "Sotudeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arman", |
|
"middle": [], |
|
"last": "Cohan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nazli", |
|
"middle": [], |
|
"last": "Goharian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "SDU@AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sajad Sotudeh, Arman Cohan, and Nazli Goharian. 2021b. On generating extended summaries of long documents. SDU@AAAI, abs/2012.14136.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Attend to medical ontologies: Content selection for clinical abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Sajad", |
|
"middle": [], |
|
"last": "Sotudeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nazli", |
|
"middle": [], |
|
"last": "Goharian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Filice", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sajad Sotudeh, Nazli Goharian, and R. Filice. 2020a. Attend to medical ontologies: Content selection for clinical abstractive summarization. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Guir at semeval-2020 task 12: Domain-tuned contextualized models for offensive language detection", |
|
"authors": [ |
|
{ |
|
"first": "Sajad", |
|
"middle": [], |
|
"last": "Sotudeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Xiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hao-Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sean", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eugene", |
|
"middle": [], |
|
"last": "Macavaney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nazli", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ophir", |
|
"middle": [], |
|
"last": "Goharian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Frieder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sajad Sotudeh, Tong Xiang, Hao-Ren Yao, Sean MacA- vaney, Eugene Yang, Nazli Goharian, and Ophir Frieder. 2020b. Guir at semeval-2020 task 12: Domain-tuned contextualized models for offensive language detection. SemEval2020.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Tl;dr: Mining reddit to learn automatic summarization", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "V\u00f6lske", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Potthast", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shahbaz", |
|
"middle": [], |
|
"last": "Syed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benno", |
|
"middle": [], |
|
"last": "Stein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael V\u00f6lske, Martin Potthast, Shahbaz Syed, and Benno Stein. 2017. Tl;dr: Mining reddit to learn automatic summarization. In NFiS@EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Transformers: State-of-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R\u00e9mi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joe", |
|
"middle": [], |
|
"last": "Davison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Shleifer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clara", |
|
"middle": [], |
|
"last": "Patrick Von Platen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yacine", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Jernite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Canwen", |
|
"middle": [], |
|
"last": "Plu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teven", |
|
"middle": [ |
|
"Le" |
|
], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Scao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mariama", |
|
"middle": [], |
|
"last": "Gugger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quentin", |
|
"middle": [], |
|
"last": "Drame", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Lhoest", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "38--45", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R\u00e9mi Louf, Morgan Funtow- icz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander M. Rush. 2020. Transformers: State-of-the-art natural language pro- cessing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "An overview of overfitting and its solutions", |
|
"authors": [ |
|
{ |
|
"first": "Xue", |
|
"middle": [], |
|
"last": "Ying", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Journal of Physics: Conference Series", |
|
"volume": "1168", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1088/1742-6596/1168/2/022022" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xue Ying. 2019. An overview of overfitting and its solutions. Journal of Physics: Conference Series, 1168:022022.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Pegasus: Pre-training with extracted gap-sentences for abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Jingqing", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yao", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Saleh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ICML", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jingqing Zhang, Yao Zhao, Mohammad Saleh, and Pe- ter J. Liu. 2019. Pegasus: Pre-training with ex- tracted gap-sentences for abstractive summarization. In ICML.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Make lead bias in your favor: Zero-shot abstractive news summarization. arXiv: Computation and Language", |
|
"authors": [ |
|
{ |
|
"first": "Chenguang", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Gmyr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuedong", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chenguang Zhu, Ziyi Yang, R. Gmyr, Michael Zeng, and Xuedong Huang. 2019. Make lead bias in your favor: Zero-shot abstractive news summarization. arXiv: Computation and Language.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "The proportion of TLDRs over entire posts (submissions and comments) submitted per year (Figures (c) and (d)). At the time of writing this paper, submissions dumps are partly uploaded for 2021 (until 2021-06), while there is no comments dumps uploaded for 2021.", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"text": "S score inter-rater agreement for annotation without context (left), and annotation with context (right)", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF3": { |
|
"text": "Heatmaps of TLDRHQ showing (a) the oracle sentence's importance to its relative position; (b) percentage of novel n-grams; and (c) n-gram abstractiveness. The heat extent shows the number of the instances within the specific bin.", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Overview of extreme summarization datasets across different social and non-social domains with number of instances.", |
|
"content": "<table/>" |
|
}, |
|
"TABREF4": { |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Average words length and number of sentences per instance along with the compression ratio in our proposed datasets.", |
|
"content": "<table/>" |
|
}, |
|
"TABREF6": { |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Detailed statistics of TLDRHQ dataset", |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |