|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:10:48.610025Z" |
|
}, |
|
"title": "Text Simplification by Tagging", |
|
"authors": [ |
|
{ |
|
"first": "Kostiantyn", |
|
"middle": [], |
|
"last": "Omelianchuk", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Vipul", |
|
"middle": [], |
|
"last": "Raheja", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Oleksandr", |
|
"middle": [], |
|
"last": "Skurzhanskyi", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Edit-based approaches have recently shown promising results on multiple monolingual sequence transduction tasks. In contrast to conventional sequence-to-sequence (Seq2Seq) models, which learn to generate text from scratch as they are trained on parallel corpora, these methods have proven to be much more effective since they are able to learn to make fast and accurate transformations while leveraging powerful pre-trained language models. Inspired by these ideas, we present TST, a simple and efficient Text Simplification system based on sequence Tagging, leveraging pre-trained Transformer-based encoders. Our system makes simplistic data augmentations and tweaks in training and inference on a preexisting system, which makes it less reliant on large amounts of parallel training data, provides more control over the outputs and enables faster inference speeds. Our best model achieves near state-of-the-art performance on benchmark test datasets for the task. Since it is fully non-autoregressive, it achieves faster inference speeds by over 11 times than the current state-of-the-art text simplification system.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Edit-based approaches have recently shown promising results on multiple monolingual sequence transduction tasks. In contrast to conventional sequence-to-sequence (Seq2Seq) models, which learn to generate text from scratch as they are trained on parallel corpora, these methods have proven to be much more effective since they are able to learn to make fast and accurate transformations while leveraging powerful pre-trained language models. Inspired by these ideas, we present TST, a simple and efficient Text Simplification system based on sequence Tagging, leveraging pre-trained Transformer-based encoders. Our system makes simplistic data augmentations and tweaks in training and inference on a preexisting system, which makes it less reliant on large amounts of parallel training data, provides more control over the outputs and enables faster inference speeds. Our best model achieves near state-of-the-art performance on benchmark test datasets for the task. Since it is fully non-autoregressive, it achieves faster inference speeds by over 11 times than the current state-of-the-art text simplification system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Text Simplification is the task of rewriting text into a form that is easier to read and understand while preserving its underlying meaning and information. It has been shown to be valuable in providing assistance in terms of readability and understandability to children (Belder and Moens, 2010; Kajiwara et al., 2013) , people with language disabilities like aphasia (Carroll et al., 1998 (Carroll et al., , 1999 Devlin and Unthank, 2006) , dyslexia (Rello et al., 2013a,b) , or autism (Evans et al., 2014) ; non-native English speakers (Petersen and Ostendorf, 2007; Paetzold, 2015; Paetzold and Specia, 2016a,b; Pellow and Eskenazi, 2014) , and people with low literacy skills or reading ages (Max, 2006; Gasperin et al., 2009; Watanabe et al., 2009) . Moreover, it has also been successfully leveraged as a pre-processing step to improve the performance of various NLP tasks such as parsing (Chandrasekar et al., 1996) , summarization (Beigman Klebanov et al., 2004; Silveira and Branco, 2012) , semantic role labeling (Vickrey and Koller, 2008; Woodsend and Lapata, 2017) and machine translation (Gerber and Hovy, 1998; \u0160tajner and Popovic, 2016; Hasler et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 272, |
|
"end": 296, |
|
"text": "(Belder and Moens, 2010;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 297, |
|
"end": 319, |
|
"text": "Kajiwara et al., 2013)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 369, |
|
"end": 390, |
|
"text": "(Carroll et al., 1998", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 391, |
|
"end": 414, |
|
"text": "(Carroll et al., , 1999", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 415, |
|
"end": 440, |
|
"text": "Devlin and Unthank, 2006)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 452, |
|
"end": 475, |
|
"text": "(Rello et al., 2013a,b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 488, |
|
"end": 508, |
|
"text": "(Evans et al., 2014)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 539, |
|
"end": 569, |
|
"text": "(Petersen and Ostendorf, 2007;", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 570, |
|
"end": 585, |
|
"text": "Paetzold, 2015;", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 586, |
|
"end": 615, |
|
"text": "Paetzold and Specia, 2016a,b;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 616, |
|
"end": 642, |
|
"text": "Pellow and Eskenazi, 2014)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 697, |
|
"end": 708, |
|
"text": "(Max, 2006;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 709, |
|
"end": 731, |
|
"text": "Gasperin et al., 2009;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 732, |
|
"end": 754, |
|
"text": "Watanabe et al., 2009)", |
|
"ref_id": "BIBREF69" |
|
}, |
|
{ |
|
"start": 896, |
|
"end": 923, |
|
"text": "(Chandrasekar et al., 1996)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 940, |
|
"end": 971, |
|
"text": "(Beigman Klebanov et al., 2004;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 972, |
|
"end": 998, |
|
"text": "Silveira and Branco, 2012)", |
|
"ref_id": "BIBREF56" |
|
}, |
|
{ |
|
"start": 1024, |
|
"end": 1050, |
|
"text": "(Vickrey and Koller, 2008;", |
|
"ref_id": "BIBREF66" |
|
}, |
|
{ |
|
"start": 1051, |
|
"end": 1077, |
|
"text": "Woodsend and Lapata, 2017)", |
|
"ref_id": "BIBREF74" |
|
}, |
|
{ |
|
"start": 1102, |
|
"end": 1125, |
|
"text": "(Gerber and Hovy, 1998;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1126, |
|
"end": 1152, |
|
"text": "\u0160tajner and Popovic, 2016;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1153, |
|
"end": 1173, |
|
"text": "Hasler et al., 2017)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Evolving from the approaches ranging from building hand-crafted rules (Chandrasekar et al., 1996; Siddharthan, 2006) to syntactic and lexical simplification via synonyms and paraphrases (Siddharthan, 2014; Kaji et al., 2002; Horn et al., 2014; Glava\u0161 and\u0160tajner, 2015) , the task has gained popularity as a monolingual Machine Translation (MT) problem, where the system learns to \"translate\" a given complex sentence to its simplified form. Initially, Statistical phrase-based (SMT) and Syntactic-based Machine Translation (SBMT) techniques (Zhu et al., 2010; Specia, 2010; Coster and Kauchak, 2011; Wubben et al., 2012; Narayan and Gardent, 2014; \u0160tajner et al., 2015; Xu et al., 2016a) were successfully applied as a way to learn simplification rewrites implicitly from complexsimple sentence pairs, often in combination with hand-crafted rules or features. More recently, several Neural Machine Translation-based (NMT) systems have been developed with promising results (Sutskever et al., 2014; Cho et al., 2014; Bahdanau et al., 2015) , and their successful application to text simplification, either in combination with SMT or other data-driven approaches Zhao et al., 2018b) ; or strictly as neural models (Wang et al., 2016; Nisioi et al., 2017; Zhang and Lapata, 2017; \u0160tajner and Nisioi, 2018; Guo et al., 2018; Vu et al., 2018; Li et al., 2018; Kriz et al., 2019; Surya et al., 2019; Zhao et al., 2020a) , has emerged as the state-of-the-art.", |
|
"cite_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 97, |
|
"text": "(Chandrasekar et al., 1996;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 98, |
|
"end": 116, |
|
"text": "Siddharthan, 2006)", |
|
"ref_id": "BIBREF54" |
|
}, |
|
{ |
|
"start": 186, |
|
"end": 205, |
|
"text": "(Siddharthan, 2014;", |
|
"ref_id": "BIBREF55" |
|
}, |
|
{ |
|
"start": 206, |
|
"end": 224, |
|
"text": "Kaji et al., 2002;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 225, |
|
"end": 243, |
|
"text": "Horn et al., 2014;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 244, |
|
"end": 268, |
|
"text": "Glava\u0161 and\u0160tajner, 2015)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 541, |
|
"end": 559, |
|
"text": "(Zhu et al., 2010;", |
|
"ref_id": "BIBREF85" |
|
}, |
|
{ |
|
"start": 560, |
|
"end": 573, |
|
"text": "Specia, 2010;", |
|
"ref_id": "BIBREF57" |
|
}, |
|
{ |
|
"start": 574, |
|
"end": 599, |
|
"text": "Coster and Kauchak, 2011;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 600, |
|
"end": 620, |
|
"text": "Wubben et al., 2012;", |
|
"ref_id": "BIBREF75" |
|
}, |
|
{ |
|
"start": 621, |
|
"end": 647, |
|
"text": "Narayan and Gardent, 2014;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 648, |
|
"end": 669, |
|
"text": "\u0160tajner et al., 2015;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 670, |
|
"end": 687, |
|
"text": "Xu et al., 2016a)", |
|
"ref_id": "BIBREF77" |
|
}, |
|
{ |
|
"start": 973, |
|
"end": 997, |
|
"text": "(Sutskever et al., 2014;", |
|
"ref_id": "BIBREF63" |
|
}, |
|
{ |
|
"start": 998, |
|
"end": 1015, |
|
"text": "Cho et al., 2014;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1016, |
|
"end": 1038, |
|
"text": "Bahdanau et al., 2015)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1161, |
|
"end": 1180, |
|
"text": "Zhao et al., 2018b)", |
|
"ref_id": "BIBREF82" |
|
}, |
|
{ |
|
"start": 1212, |
|
"end": 1231, |
|
"text": "(Wang et al., 2016;", |
|
"ref_id": "BIBREF68" |
|
}, |
|
{ |
|
"start": 1232, |
|
"end": 1252, |
|
"text": "Nisioi et al., 2017;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 1253, |
|
"end": 1276, |
|
"text": "Zhang and Lapata, 2017;", |
|
"ref_id": "BIBREF79" |
|
}, |
|
{ |
|
"start": 1277, |
|
"end": 1302, |
|
"text": "\u0160tajner and Nisioi, 2018;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1303, |
|
"end": 1320, |
|
"text": "Guo et al., 2018;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1321, |
|
"end": 1337, |
|
"text": "Vu et al., 2018;", |
|
"ref_id": "BIBREF67" |
|
}, |
|
{ |
|
"start": 1338, |
|
"end": 1354, |
|
"text": "Li et al., 2018;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 1355, |
|
"end": 1373, |
|
"text": "Kriz et al., 2019;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 1374, |
|
"end": 1393, |
|
"text": "Surya et al., 2019;", |
|
"ref_id": "BIBREF62" |
|
}, |
|
{ |
|
"start": 1394, |
|
"end": 1413, |
|
"text": "Zhao et al., 2020a)", |
|
"ref_id": "BIBREF83" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Human editors perform several rewriting transformations in order to simplify a sentence, such as lexical paraphrasing, changing the syntactic structure, or removing superfluous information from the sentence (Petersen and Ostendorf, 2007; Mallinson et al., 2020) . Therefore, even though NMT-based sequence-to-sequence (Seq2Seq) approaches offer a generic framework for modeling almost any kind of sequence transduction, target texts in these approaches are typically generated from scratch -a process which can be unnecessary for monolingual editing tasks such as text simplification, owing to these aforementioned transformations. Moreover, these approaches have a few shortcomings that make them inconvenient for real-world deployment. First, they give limited insight into the simplification operations and provide little control or adaptability to different aspects of simplification (e.g., lexical vs. syntactical simplification). This inhibits interpretability and explainability, which is crucial for real-world settings. Second, they are not sample-efficient and require a large number of complex-simple aligned sentence pairs for training, which requires considerable human effort to obtain. Third, these models typically employ an autoregressive decoder, i.e., output texts are generated in a sequential, non-parallel fashion, and hence, are generally characterized by slow inference speeds.", |
|
"cite_spans": [ |
|
{ |
|
"start": 207, |
|
"end": 237, |
|
"text": "(Petersen and Ostendorf, 2007;", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 238, |
|
"end": 261, |
|
"text": "Mallinson et al., 2020)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Based on the aforementioned observations and issues, text-editing approaches have recently regained significant interest (Gu et al., 2019; Dong et al., 2019; Awasthi et al., 2019; Malmi et al., 2019; Omelianchuk et al., 2020; Mallinson et al., 2020) . Typically, the set of edit operations in such tasks is fixed and predefined ahead of time, which on one hand limits the flexibility of the model to reconstruct arbitrary output texts from their inputs, but on the other, leads to higher sample-efficiency as the limited set of allowed operations significantly reduces the search space (Mallinson et al., 2020) . This pattern is especially true for monolingual settings where input and output texts have relatively high degrees of overlap. In such cases, a natural approach is to cast the task of conditional text generation into a text-editing task, where the model learns to reconstruct target texts by applying a set of edit operations to the inputs. We leverage this insight in our work, and simplify the task from sequence generation or editing, going a step further, to formulate it as a sequence tagging task. In addition to being sample efficient, thanks to the separation of various edit operations in the form of tags, the system has better interpretability and explainability. Finally, since for sequence tagging we don't need to predict tokens one-by-one as in autoregressive decoders, the inference is naturally parallelizable and therefore runs many times faster.", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 138, |
|
"text": "(Gu et al., 2019;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 139, |
|
"end": 157, |
|
"text": "Dong et al., 2019;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 158, |
|
"end": 179, |
|
"text": "Awasthi et al., 2019;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 180, |
|
"end": 199, |
|
"text": "Malmi et al., 2019;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 200, |
|
"end": 225, |
|
"text": "Omelianchuk et al., 2020;", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 226, |
|
"end": 249, |
|
"text": "Mallinson et al., 2020)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 586, |
|
"end": 610, |
|
"text": "(Mallinson et al., 2020)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Following from the success of the aforementioned monolingual edit-tag based systems, we propose to leverage the current state-of-the-art model for Grammatical Error Correction by Omelianchuk et al. (2020) (GECToR) and adapt it to the task of Text Simplification. In summary, we make the following contributions:", |
|
"cite_spans": [ |
|
{ |
|
"start": 179, |
|
"end": 213, |
|
"text": "Omelianchuk et al. (2020) (GECToR)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We develop a Text Simplification system by adapting the GECToR model to Text Simplification, leveraging Transformer-based encoders trained on large amounts of humanannotated and synthetic data. 1 Empirical results demonstrate that our system achieves near state-of-the-art performance on benchmark test datasets in terms of readability and simplification metrics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We propose crucial data augmentations and tweaks in training and inference and show their significant impact on the task: enabling the model to learn to edit the sentences more effectively, rather than relying heavily on copying the source sentences, leading to a higher quality of simplifications.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Since our model is a non-autoregressive sequence tagging model, it achieves over 11 times speedup in inference time, compared to the state-of-the-art for Text Simplification. Figure 1 : Text Simplification by Tagging (TST): A given sentence undergoes multiple iterations of tagand-edit transformations, where, in each iteration, it is tagged using custom token-level edit-tags, and the sequence of tags is converted back to text by applying those edits, iteratively making simplifying edits. labels sequentially. Our model for sentence simplification does not rely on external simplification rules nor alignment tools. Ribeiro et al. (2018) proposed an approach applied only to character deletion and insertion and was based on simple patterns. LaserTagger (Malmi et al., 2019 ) combines a BERT encoder with an autoregressive Transformer decoder to similarly predict the aforementioned three main edit operations for several text editing tasks. In contrast, in our system, the decoder is a softmax layer. Similarly, EditNTS (Dong et al., 2019) and PIE (Awasthi et al., 2019) predict edit labels, developed specifically for text simplification and GEC, respectively. While EditNTS employs an autoregressive encoder-decoder based neural programmer-interpreter model, PIE differs from our work because of our custom edit transformations and incorporation of a pre-trained Transformer encoder for sequence tagging. Levenshtein Transformer (Gu et al., 2019) , an autoregressive model that performs text editing by executing a sequence of deletion and insertion actions, is another recent work along similar lines. More recently, Mallinson et al. (2020) proposed Felix -a text-editing-based system for multiple generation tasks, splitting the text-editing task into two subtasks: tagging and insertion. Their tagging model employs a Pointer mechanism, while the insertion model is based on a Masked Language Model. 2020, who leveraged similar frameworks for different text editing problems such as GEC, Sentence Fusion, and Abstractive Summarization, we formulate the task of Text Simplification as a tagging problem. Specifically, our system is based on GECToR (Omelianchuk et al., 2020) , an iterative sequencetagging system that works by predicting token-level edit operations, originally developed for Grammatical Error Correction (GEC). We adapt the GECToR framework for the task of Text Simplification, with minimal modifications to the original architecture. Our system consists of three main parts: (a) defining the custom transformations (token-level edittags), (b) performing iterative sequence tagging to convert target sequences to tag sequences, (c) fine-tuning of pre-trained Transformers to predict the tag sequences. Each of these components are described below.", |
|
"cite_spans": [ |
|
{ |
|
"start": 621, |
|
"end": 642, |
|
"text": "Ribeiro et al. (2018)", |
|
"ref_id": "BIBREF52" |
|
}, |
|
{ |
|
"start": 759, |
|
"end": 778, |
|
"text": "(Malmi et al., 2019", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 1026, |
|
"end": 1045, |
|
"text": "(Dong et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1054, |
|
"end": 1076, |
|
"text": "(Awasthi et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1437, |
|
"end": 1454, |
|
"text": "(Gu et al., 2019)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1626, |
|
"end": 1649, |
|
"text": "Mallinson et al. (2020)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 2158, |
|
"end": 2184, |
|
"text": "(Omelianchuk et al., 2020)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 177, |
|
"end": 185, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In order to formulate the task as a tagging problem, building on the aforementioned edit-tagging-based approaches, we use custom token-level edit operations (also referred to as edit-tags or transformations) to perform text simplification. Formally, given a sentence x:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Edit Transformations", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "[x 1 , x 2 , . . . , x N ]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Edit Transformations", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": ", and its simplified form y: [y 1 , y 2 , . . . , y M ] as the target sentence, we aim to predict an edit tag t i \u2208 \u03c4 (\u03c4 denoting the edit-tag vocabulary) for each token x i in x, generating a sequence of edit-tags of the same length N as the input sequence x, such that t i (x i ): applying the edit operation represented by the edit-tag t i to the input token x i at each position i, reconstructs the target sequence y, even though M \u2264 N .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Edit Transformations", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We reuse the edit transformations in GECToR, which were developed for GEC. We chose to do so because we found a significantly high overlap of 92.64% in the tag distributions between the GEC and Text Simplification domains. This was done by building the edit-tag vocabularies independently on both (GEC and Text Simplification) datasets and comparing the tag distributions represented by the two vocabularies. This was not surprising since these edit-tags have been obtained from huge amounts of synthetic GEC data, they are expected to have good coverage with many standard monolingual text editing problems. Additionally, using the same edit-tags is a necessary pre-requisite to leverage GEC initialization in the model (Section 4.3), which we later show to be quite impactful for our text simplification system (Section 6.1). Consequently, the edit space \u03c4 is of size 5000, out of which 4971 are basic edit-tags and 29 are token-independent GEC-specific edit-tags (such as $TRANSFORM_VERB_VB_VBZ, which converts a verb in its base form to its third person singular present tense form). Further, the aforementioned 4971 basic edit-tags are made up of token-independent KEEP and DELETE tags (which simply keep or delete the given word(s) on which they are applied), 1167 token-dependent APPEND tags (such as $APPEND_just, which appends the word \"just\" to the given word) and 3802 token-dependent REPLACE tags (such as $REPLACE_really, which replaces the given word with the word \"really\").", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Edit Transformations", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "As described in Section 3.1, we predict the edit-tags t i for each input token x i in the source sequence x. These predicted tag-encoded transformations are then applied to the source sentence to get the simplified sentence. Since some simplification operations in a sentence may depend on others, applying the sequence tagger only once may not be enough to fully generate the simplified form of a given sentence. Accordingly, we use the iterative correction approach from Awasthi et al. 2019and Omelianchuk et al. (2020) , and use the sequence tagger to tag the now modified sequence, and apply the corresponding transformations on the new edit-tags, which changes the sentence further. We repeat this process for a fixed number of iterations, which can be adjusted to trade off qualitative performance for improved speed. In our framework, we experimented between 1-5 iterations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 496, |
|
"end": 521, |
|
"text": "Omelianchuk et al. (2020)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Iterative Sequence Tagging", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We use the GECToR sequence tagging model with a pre-trained RoBERTa BASE Transformer (Liu et al., 2019) as the encoder, stacked with two concurrent feed-forward layers, followed by corresponding Softmax layers. Owing to our choice of encoder, we use Byte-Pair Encoding (BPE) (Sennrich et al., 2016) as our tokenization technique.", |
|
"cite_spans": [ |
|
{ |
|
"start": 85, |
|
"end": 103, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tagging Model", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "As shown in Fig. 1 , these feed-forward layers are responsible for detecting and classifying edits, respectively. For every position in the input sequence, the edit-detection layer predicts the probability an edit exists, whereas the edit-classification layer predicts the type of edit-tag. The edit-tag sequence generated as the output of the edit-classification layer is gated by the output of the edit-detection layer. i.e. if the output of the edit-detection layer is below the minimum edit probability threshold (described in Section 4.6) at any position in the predicted sequence, we do not make any edits.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 18, |
|
"text": "Fig. 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Tagging Model", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We use WikiSmall and WikiLarge, two benchmark datasets for the text simplification task 2 , for our experiments. These datasets were constructed from automatically-aligned complex-simple sentence pairs from English Wikipedia (EW) and Simple English Wikipedia (SEW). WikiSmall (Zhu et al., 2010) contains one reference simplification per sentence. We use the standardized split of this dataset released by Zhang and Lapata (2017) , with 88k instances for training, 205 for validation and the same original 100 instances for testing. Wik-iLarge is a larger set of similarly automaticallyaligned complex-simple sentence pairs, compiled from previous extractions of EW-SEW and Wik-iSmall (Zhu et al., 2010; Woodsend and Lapata, 2011; Kauchak, 2013) . Similar to WikiSmall, we use the training set for this dataset provided by Zhang and Lapata (2017) consisting of 296k sentence pairs. For simplicity, we refer to this training data (WikiSmall + WikiLarge) as WikiAll.", |
|
"cite_spans": [ |
|
{ |
|
"start": 276, |
|
"end": 294, |
|
"text": "(Zhu et al., 2010)", |
|
"ref_id": "BIBREF85" |
|
}, |
|
{ |
|
"start": 405, |
|
"end": 428, |
|
"text": "Zhang and Lapata (2017)", |
|
"ref_id": "BIBREF79" |
|
}, |
|
{ |
|
"start": 684, |
|
"end": 702, |
|
"text": "(Zhu et al., 2010;", |
|
"ref_id": "BIBREF85" |
|
}, |
|
{ |
|
"start": 703, |
|
"end": 729, |
|
"text": "Woodsend and Lapata, 2011;", |
|
"ref_id": "BIBREF73" |
|
}, |
|
{ |
|
"start": 730, |
|
"end": 744, |
|
"text": "Kauchak, 2013)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 822, |
|
"end": 845, |
|
"text": "Zhang and Lapata (2017)", |
|
"ref_id": "BIBREF79" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Sources", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For validation and test sets, we use the Turkcorpus (Xu et al., 2016a) and ASSET (Alva-Manchego et al., 2020) datasets, which were both created from WikiLarge using the same 2000 validation and 359 test source sentences, where each complex sentence consists of multiple crowd-sourced reference simplifications. Specifically, Turkcorpus contains 8 reference simplifications, and ASSET contains 10 references per source sentence. Table 1 provides other statistics on these datasets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 52, |
|
"end": 70, |
|
"text": "(Xu et al., 2016a)", |
|
"ref_id": "BIBREF77" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 428, |
|
"end": 435, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Sources", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "WikiAll data contains special tokens to represent parentheses (symbolized by -LRB-and -RRB-) from prior tokenizations. We heuristically decide to remove these tokens (and any tokens between them) from both source and target sentences. Doing this led to consistent improvements in all our experiments, described further in Section 6.2. Additionally, for tokenization, we use the HuggingFace Tokenizers 3 Python library to tokenize the whole sentence (as opposed to the approach in GECToR which tokenized each word in the sentence separately). This change led faster and more accurate tokenization as the one originally used in RoBERTa.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data pre-processing", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "For our experiments, we use two versions of the tagging model described in Section 3.3. The first version is a pre-trained RoBERTa BASE encoder with randomly initialized feed-forward layers. We refer to this model as TST-BASE (Text Simplification by Tagging -Baseline). The second version of the model is a TST-BASE model fine-tuned on the Grammatical Error Correction (GEC) task: henceforth denoted as TST-GEC. 4", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-Training", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We hypothesize that our text simplification models can benefit from an increase of the training data, and experimentally confirm this by training and evaluating our models with additional training data. We generate synthetic training data from the source sentences of WikiAll. We used two approaches to do so: back-translation and ensemble distillation, described below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Augmentation", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "We use the Transformer-based NMT models trained by Tiedemann and Thottingal (2020) to generate the back-translated versions of the target side of the parallel WikiAll data. These models were trained on OPUS data 5 using Marian-NMT 6 and released as part of the HuggingFace Transformers Library (Wolf et al., 2019) . All models are Transformer encoder-decoders with 6 layers in each component. Specifically, we used the bilingual EN-FR, FR-EN, EN-DE and DE-EN models 7 to translate WikiAll data from (a) English to French, and back to English, and (b) English to German and back to English. Doing so tripled the amount of WikiAll data available for training (Table 1) . The backtranslated WikiAll data is henceforth collectively referred to as WikiBT.", |
|
"cite_spans": [ |
|
{ |
|
"start": 51, |
|
"end": 82, |
|
"text": "Tiedemann and Thottingal (2020)", |
|
"ref_id": "BIBREF65" |
|
}, |
|
{ |
|
"start": 294, |
|
"end": 313, |
|
"text": "(Wolf et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 657, |
|
"end": 666, |
|
"text": "(Table 1)", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Back-Translation", |
|
"sec_num": "4.4.1" |
|
}, |
|
{ |
|
"text": "We leverage knowledge distillation on ensemble teacher models (Freitag et al., 2017) to augment our training data. We first create an ensemble teacher model by training models on WikiAll and WikiBT data. Specifically, for building the teacher ensemble, we first train the following constituent TST models:", |
|
"cite_spans": [ |
|
{ |
|
"start": 62, |
|
"end": 84, |
|
"text": "(Freitag et al., 2017)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ensemble Distillation", |
|
"sec_num": "4.4.2" |
|
}, |
|
{ |
|
"text": "1. TST: Trained on WikiAll 2. TST-GEC: Trained on WikiAll", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ensemble Distillation", |
|
"sec_num": "4.4.2" |
|
}, |
|
{ |
|
"text": "The predictions of the ensemble are computed by taking the argmax of the averaged class-wise probabilities of the constituent models at every token. We get the predictions from this ensemble consisting of the aforementioned three constituent models on WikiAll data. In this way, we produce new references for the training data which can be used by our final model (referred to as the student network) to simulate the teacher network ensemble. We then combine this ensemble-generated training data (hereby referred to as WikiEns) together with the original WikiAll data, doubling the amount of training data. Our final model (the student network, denoted henceforth as TST-FINAL) is then trained on this combined WikiEns + WikiAll dataset. It is worth noting that the student and the constituent teacher models have exactly the same architecture.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "TST: Trained on WikiAll + WikiBT", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "We train our models with AllenNLP and Transformers. Our baseline (TST-BASE) mostly follows the settings in Omelianchuk et al. (2020) . We train the model for 50 epochs, where we freeze the encoder weights during the first two epochs of training. We use Adam optimizer (Kingma and Ba, 2015), where the learning rate starts from 1e-5 and reduces by a factor of 0.1 when the validation loss has stopped improving for 10 epochs. We perform early stopping after 3 epochs, based on the performance on the validation set. Other training hyper-parameters are listed in Appendix A.", |
|
"cite_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 132, |
|
"text": "Omelianchuk et al. (2020)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "One of the advantages of edit-tag-based approaches is that they provide greater control over the system output. Building on Omelianchuk et al. 2020, we use confidence biases and minimum edit probability as additional inference hyper-parameters that we tune to push the model to perform more precise edits.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inference Tweaks", |
|
"sec_num": "4.6" |
|
}, |
|
{ |
|
"text": "Specifically, we add confidence biases to the probabilities of KEEP and DELETE edit-tags: responsible for not changing the source token and deleting the source token, respectively. We create these additional hyper-parameters for just these edit-tags because they are the most frequently used edit-tags for the Text Simplification task. Moreover, since they are token-independent, it provides the framework with additional robustness on the task without introducing too many additional hyperparameters. In this way, we were able to drive the model to keep/delete more tokens if the corresponding confidence bias was positive and to keep/delete fewer tokens if it was negative. We also add a sentence-level minimum edit probability threshold ( ) for the output of the edit detection layer. This hyper-parameter enabled the model to predict only the most confident edits. Thus, we were able to increase precision by trading off the recall and achieve better performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inference Tweaks", |
|
"sec_num": "4.6" |
|
}, |
|
{ |
|
"text": "These hyper-parameters were tuned using a combination of random search and Bayesian search (Nogueira, 2014) on the respective validation sets. Section 6 further describes the impact of the aforementioned tweaks on the system. Final values of these hyper-parameters are listed in Appendix A.", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 107, |
|
"text": "(Nogueira, 2014)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inference Tweaks", |
|
"sec_num": "4.6" |
|
}, |
|
{ |
|
"text": "We report the results using two widely used metrics in Text Simplification literature: FKGL (Kincaid et al., 1975) , and SARI (Xu et al., 2016b) . Prior work has also used BLEU (Papineni et al., 2002) as a metric, but recent work has found that it is not a suitable metric for evaluating text simplification, because it was found to be negatively correlated with simplicity, essentially penalizing simpler sentences (Sulem et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 92, |
|
"end": 114, |
|
"text": "(Kincaid et al., 1975)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 126, |
|
"end": 144, |
|
"text": "(Xu et al., 2016b)", |
|
"ref_id": "BIBREF78" |
|
}, |
|
{ |
|
"start": 177, |
|
"end": 200, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 416, |
|
"end": 436, |
|
"text": "(Sulem et al., 2018)", |
|
"ref_id": "BIBREF61" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "4.7" |
|
}, |
|
{ |
|
"text": "FKGL (Flesch-Kincaid Grade Level) is used to measure the readability of the generated sen-tence, where a lower score indicates simpler output. FKGL doesn't use source sentences or references for computing the score. It is a linear combination of the number of words per sentence (system output) and the number of syllables per word. On the other hand, SARI (System output Against References and against the Input sentence) evaluates the quality of the output by comparing the generated sentence to a set of reference sentences in terms of correctly inserted, kept and deleted ngrams (n \u2208 1, 2, 3, 4) . We report the overall SARI metric, and scores on the three rewrite operations used in SARI: the F1-scores of add (ADD), delete (DELETE) and keep (KEEP) operations. FKGL and SARI are both measured at corpus-level. We computed all the evaluation metrics using the EASSE 8 Python package (Alva-Manchego et al., 2019) . Table 2 summarizes the results of our evaluations on TurkCorpus, ASSET and WikiSmall test sets. To ensure robustness of results, we report average scores of 4 runs with different random seeds. We compare the results of our baseline model (TST-BASE) and our final model (TST-FINAL) against recent state-of-the-art Neural Text Simplification models. Additionally, we compare against a reference baseline similar to Martin et al. (2020b) , where we compute the scores in a leave-one-out scenario where each reference is evaluated against all the others and then scores are averaged over all references. TST-FINAL consists of all the enhancements mentioned in Section 4 added on top of TST-BASE: data pre-processing, GEC-initialization, data augmentation, and inference tweaks. In terms of the FKGL score, our system achieves better results than the reference baselines on TurkCorpus, and comes within 0.5 points on ASSET and Wik-iSmall. Compared to the state-of-the-art (Martin et al., 2020b) , it improves by 0.23 FKGL points on average, indicating that the simplifying edits made by TST are easier to understand.", |
|
"cite_spans": [ |
|
{ |
|
"start": 887, |
|
"end": 915, |
|
"text": "(Alva-Manchego et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1331, |
|
"end": 1352, |
|
"text": "Martin et al. (2020b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1885, |
|
"end": 1907, |
|
"text": "(Martin et al., 2020b)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 586, |
|
"end": 599, |
|
"text": "\u2208 1, 2, 3, 4)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 918, |
|
"end": 925, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "4.7" |
|
}, |
|
{ |
|
"text": "In terms of SARI metrics, TST-BASE achieves a competitive score of 39.17 on TurkCorpus, and a state-of-the-art SARI score of 43.11 on WikiSmall, outperforming the previous state-of-the-art result by a huge margin of 6.19 SARI points. This shows that simply using our baseline architecture to train a Text Simplification model on WikiAll can achieve competitive performance on the task. On the other hand, the TST-FINAL makes significant improvements over TST-BASE. On TurkCorpus and AS-SET, it comes within 1 SARI point of the current state-of-the-art (Martin et al., 2020b) , outperforming all other prior text simplification models in literature. On WikiSmall, it further improves its state-of-the-art performance from TST-BASE to achieve a SARI score of 44.67. It can be seen that compared to prior works, the most significant improvements in both the TST models come from ADD and DELETE operations. It is noteworthy that TST-FINAL is able to achieve the highest F1 scores on ADD (6.96) and DELETE (47.87) SARI operations reported in literature on TurkCorpus. On the ASSET dataset, the F1 scores on ADD and DELETE operations improve further to 8.04 and 64.25 respectively, improving by large margins over TST-BASE. Similarly, it outperforms the state-of-the-art on ADD operations on Wik-iSmall. This shows that models proposed in prior works learned a safe, but inefficient strategy of simplification -leaning heavily on copying the sources sentences directly, owing to their high KEEP scores. By contrast, our model learns to edit the sentences better, as shown by the lower rates of keeping the source sentences unchanged. This is further verified by the fact that outputs of prior works 9 have much longer output sentence lengths (avg. 19.26 words) compared to ours (avg. 16.7 words), leading to more effective simplifications.", |
|
"cite_spans": [ |
|
{ |
|
"start": 552, |
|
"end": 574, |
|
"text": "(Martin et al., 2020b)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Simplification and Readability", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We also compare our system's inference times against the current state-of-the-art text simplification systems. Specifically, we compare against ACCESS (trained on WikiLarge data) (Martin et al., 2020a) and BART+ACCESS (Martin et al., 2020b ) (trained on WikiLarge + MINED data) systems. We used the publicly available model checkpoint for ACCESS to compare against Martin et al. (2020a) . Direct comparison against BART+ACCESS was not possible because of the lack of publicly available code. Therefore, we used BART (Lewis et al., 2020) for text summarization as a proxy for Martin et al. (2020b) . We ran all systems with batch size 128 on the TurkCorpus test set 100 times, using NVIDIA Tesla V100. Within a single run, the results were averaged across all batches. We took into account only the actual inference time and omitted any initialization times.", |
|
"cite_spans": [ |
|
{ |
|
"start": 179, |
|
"end": 201, |
|
"text": "(Martin et al., 2020a)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 218, |
|
"end": 239, |
|
"text": "(Martin et al., 2020b", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 365, |
|
"end": 386, |
|
"text": "Martin et al. (2020a)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 516, |
|
"end": 536, |
|
"text": "(Lewis et al., 2020)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 575, |
|
"end": 596, |
|
"text": "Martin et al. (2020b)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inference Time", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The results in Table 3 show that the inference speeds 10 of TST are at least 6 times faster than ACCESS and 11.75 times faster than pure BART which is the crucial component of the current stateof-the-art (Martin et al., 2020b) . The impact of a non-autoregressive model architecture can be clearly seen here since TST is a sequence tagging system and does not need to predict edits one-by-one as done by auto-regressive transformer decoders (like the one used in ACCESS). There- 9 Measured on TurkCorpus. This information was not available for ASSET and WikiSmall 10 We compared ACCESS and BART with beam size 8 and TST with 2 iterations, as reported on TurkCorpus", |
|
"cite_spans": [ |
|
{ |
|
"start": 204, |
|
"end": 226, |
|
"text": "(Martin et al., 2020b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 479, |
|
"end": 480, |
|
"text": "9", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 564, |
|
"end": 566, |
|
"text": "10", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 22, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Inference Time", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Inference time (sec) BART, beam size = 8 2.82 BART, beam size = 2 1.95 ACCESS, beam size = 8 1.43 ACCESS, beam size = 1 1.14 TST, 5 iterations 0.43 TST, 4 iterations 0.39 TST, 3 iterations 0.33 TST, 2 iterations 0.24 TST, 1 iteration 0.13 Table 3 : Average inference time per batch. In the context of TST, iterations refers to the number of iterations mentioned in Section 3.2 fore, the inference is naturally parallelizable and therefore runs many times faster.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 239, |
|
"end": 246, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this section, we present ablation experiments for each of the enhancements described in Section 4, and applied to TST-BASE to obtain TST-FINAL. The results of these experiments (Table 4) are reported on SARI and FKGL scores, averaged between ASSET and TurkCorpus test datasets. Each result is reported using an average of 4 runs for each experiment. Overall, the enhancements improve the SARI score by 4.0 points and FKGL by 0.21 points, while reducing variance in both cases.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 180, |
|
"end": 189, |
|
"text": "(Table 4)", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Study", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We improve our strong baseline model TST-BASE by pre-training it on the GEC task. 11 Even though we find that using TST-GEC leads to modest immediate improvements (+0.1 SARI point) compared to TST-BASE, we found that adding other enhancements without the GEC-pre-training were not as effective, with the final model (TST-BASE + Filtering + WikiEns + InfTweaks) achieving an average SARI score of 40.01 -significantly lower than the one with GEC-pre-training (42.3). These results show that pre-training TST-BASE on GEC is an effective way to initialize the model for Text Simplification, since it equips the model to make additions and deletions, which are then further improved during training on the text simplification data. This was also not unexpected because the edit-tags were obtained from huge amounts of GEC data, and are expected to have good coverage with regards to many standard monolingual text editing problems -as also observed by a high overlap in the tag distributions between the GEC and Text Simplification domains (Section 3.1).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "GEC Initialization", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "As mentioned in Section 4.2, we removed special tokens found in Wikipedia data such as -LRB-and -RRB-, along with the text enclosed by these tokens, in both source and target sentences. We find that using GEC initialization together with filtering brackets was beneficial to the system (+0.8 SARI points), and also decreased the variance in the results. The benefit of this step is towards improving text simplification quality is also seconded by a significantly reduced FKGL score (-0.66 points).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Pre-processing", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "We explored two strategies of data augmentation: enriching training data with (i) back-translated data (WikiBT), (ii) ensemble-generated data (WikiEns). Augmenting the training data using WikiEns leads to a bigger boost compared to just adding WikiBT (+1.2 vs +0.4). We also experimented with adding both synthetic datasets (WikiEnd + WikiBT) to the WikiAll training data, but the performance was worse compared to using only WikiEns.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Augmentation", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "Finally, we describe the effect of tuning the inference hyper-parameters for our model obtained so far. Using these tweaks (Section 4.6) is one of the most crucial components of our system. Overall, it is not just able to affect the sequence generation, but also gives us the biggest boost (+2.0 points). Our final model with inference tweaks comfortably outperforms its predecessor on all datasets, demonstrating their effectiveness on the task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inference Tweaks", |
|
"sec_num": "6.4" |
|
}, |
|
{ |
|
"text": "This paper introduces TST, a novel approach to text simplification, by reformulating the task into a much simpler one of sequence tagging. We build TST by adapting the GECToR framework for GEC. We show that most of its performance gains are owed to simplistic data augmentations and tweaks in training and inference. These modifications allow us to derive maximal benefit from the already existing pre-trained Transformer-based encoders on large amounts of human-annotated and synthetic data, making TST a simple, powerful, easily reusable method for monolingual editing tasks. Since TST is able to progressively make simplifying edits via explicit edit-tag operations, the transformations resulting from TST are better explainable and interpretable than any other NMTbased Seq2Seq approaches. Finally, TST is fully non-autoregressive, enabling it to perform faster inference than any other state-of-the-art text simplification methods. Our empirical results demonstrate that it achieves near state-of-the-art performance on benchmark test datasets for text simplification. A major motivation in this work was to minimize changes to the original model to keep the system simple, fast, and reproducible. Hence, we restricted our system to only use WikiAll data and its derivatives (vs. any external data like the state-of-the-art system by Martin et al. (2020b) ). While we did not fully beat the state-of-the-art on the task, we believe that using larger models (eg. RoBERTa LARGE ), ensembles, or external data will likely lead to better SARI scores at the cost of speed and system complexity: ideas we plan to explore in future work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1339, |
|
"end": 1360, |
|
"text": "Martin et al. (2020b)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "he also completed two collections of short stories entitled the ribbajack & other curious yarns and seven strange and ghostly tales . Reference he also wrote two books of short stories called , the ribbajack & other curious yarns and seven strange and ghostly tales . TST-BASE he also wrote two collections of short stories called the ribbajack & other curious yarns and seven strange and ghostly tales . TST-FINAL he also wrote a series of short stories called the ribbajack & other curious yarns and seven strange and ghostly tales .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Original", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "it is theoretically possible that the other editors who may have reported you , and the administrator who blocked you , are part of a conspiracy against someone half a world away they 've never met in person . Reference it is theoretically possible that the other editors who may have written about you, and the officer who blocked you, are part of a bad plan against someone miles away, they 've never met face to face . TST-BASE it is possible that the other editors who may have reported you , and the administrator who blocked you , are part of a conspiracy against someone half a world away they ' ve never met in person . TST-FINAL it might be that the other editors who may have sent you , and the administrator who blocked you , are part of a conspiracy against someone half a world away where they 've never met in person .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Original", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Original as a result , although many mosques will not enforce violations , both men and women when attending a mosque must adhere to these guidelines . Reference as a result , both men and women must follow this rule when they attend a mosque , even though many mosques do not enforce these rules TST-BASE both men and women when going a mosque must follow these rules . TST-FINAL both men , and women that attend mosque , must follow the law .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Original", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "hinterrhein is an administrative district in the canton of graub\u00fcnden , switzerland . Reference hinterrhein is a district of the canton of graub\u00fcnden , switzerland .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Original", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "hinterrhein is a district of the canton of graub\u00fcnden , switzerland . TST-FINAL hinterrhein is a part of the canton of graub\u00fcnden in the switzerland .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "TST-BASE", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "a majority of south indians speak one of the five dravidian languages -kannada , malayalam , tamil , telugu and tulu . Reference many of the south indians are dravidians and they speak one of four dravidian languages -kannada , malayalam , tamil or telugu . TST-BASE most of south indians speak one of the five dravidian languages -kannada , malayalam , tamil , telugu and tulu . TST-FINAL most of the people speak speakers from the three dravidian languages spoken are , kannada , malayalam , tamil , telugu , and tulu . ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Original", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Available at https://github.com/grammarly/ gector#text-simplification", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Another widely-used dataset for the task, the Newsela Corpus(Xu et al., 2015), could not be used due to its extremely rigid legal and licensing requirements.3 https://github.com/huggingface/ tokenizers", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We refer the reader toOmelianchuk et al. (2020) for details on training the model for GEC.5 http://opus.nlpl.eu/ 6 https://marian-nmt.github.io/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://huggingface.co/Helsinki-NLP/ opus-mt-<L1>-<L2>", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/feralvam/easse", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We refer the reader toOmelianchuk et al. (2020) for details on training the model for GEC.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": " Table 5 describes the list of hyper-parameters used for TST-FINAL model. In Table 6 , we list the inference tweaks hyper-parameters found by Bayesian Search on TurkCorpus and ASSET datasets. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1, |
|
"end": 8, |
|
"text": "Table 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 77, |
|
"end": 84, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Model Configurations", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Various examples from our system are shown in Table 7 . Examining the simplifications, we see reduced sentence length, sentence splitting of a complex sentence into multiple shorter sentences, and the use of simpler vocabulary. Manual comparison between TST-BASE and TST-FINAL shows that the first system tends to delete some complex words from the text. For example, \"theoretically possible\" gets shortened to just \"possible,\" and \"administrative district\" to \"district\". TST-FINAL model tends to be more creative and changes phrases to simpler versions like \"is theoretically possible\" to \"might be\" or \"an administrative district\" to \"a part of.\" However, this aggressive and creative strategy sometimes also generates ungrammatical output like in the last example in Table 7 . While it rarely happens, but the model might also change the meaning of the original sentence. For example, replacing \"the five\" to \"the three.\" It is worth noticing that the same problem of meaning change is present in the reference sentences as well: where \"the five\" got replaced with \"one of four\". ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 46, |
|
"end": 53, |
|
"text": "Table 7", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 771, |
|
"end": 778, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "B Simplification Examples", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "A corpus analysis of simple account texts and the proposal of simplification strategies: first steps towards text simplification systems", |
|
"authors": [ |
|
{ |
|
"first": "Sandra", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Alu\u00edsio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"A S" |
|
], |
|
"last": "Pardo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Maziero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Helena", |
|
"middle": [], |
|
"last": "De Medeiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"P M" |
|
], |
|
"last": "Caseli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fortes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "SIGDOC '08", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sandra M. Alu\u00edsio, Lucia Specia, T. A. S. Pardo, E. Maziero, Helena de Medeiros Caseli, and R. P. M. Fortes. 2008. A corpus analysis of simple account texts and the proposal of simplification strategies: first steps towards text simplification systems. In SIGDOC '08.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "A corpus analysis of simple account texts and the proposal of simplification strategies: First steps towards text simplification systems", |
|
"authors": [ |
|
{ |
|
"first": "Sandra", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Alu\u00edsio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Thiago", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erick", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Pardo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Helena", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Maziero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Renata", |
|
"middle": [ |
|
"P M" |
|
], |
|
"last": "Caseli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fortes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 26th Annual ACM International Conference on Design of Communication, SIGDOC '08", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "15--22", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/1456536.1456540" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sandra M. Alu\u00edsio, Lucia Specia, Thiago A. S. Pardo, Erick G. Maziero, Helena M. Caseli, and Renata P. M. Fortes. 2008. A corpus analysis of sim- ple account texts and the proposal of simplification strategies: First steps towards text simplification sys- tems. In Proceedings of the 26th Annual ACM Inter- national Conference on Design of Communication, SIGDOC '08, page 15-22, New York, NY, USA. As- sociation for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Learning how to simplify from explicit labeling of complex-simplified text pairs", |
|
"authors": [ |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Alva-Manchego", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joachim", |
|
"middle": [], |
|
"last": "Bingel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gustavo", |
|
"middle": [], |
|
"last": "Paetzold", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carolina", |
|
"middle": [], |
|
"last": "Scarton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Eighth International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "295--305", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fernando Alva-Manchego, Joachim Bingel, Gustavo Paetzold, Carolina Scarton, and Lucia Specia. 2017. Learning how to simplify from explicit labeling of complex-simplified text pairs. In Proceedings of the Eighth International Joint Conference on Natu- ral Language Processing (Volume 1: Long Papers), pages 295-305, Taipei, Taiwan. Asian Federation of Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "ASSET: A dataset for tuning and evaluation of sentence simplification models with multiple rewriting transformations", |
|
"authors": [ |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Alva-Manchego", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Louis", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carolina", |
|
"middle": [], |
|
"last": "Scarton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beno\u00eet", |
|
"middle": [], |
|
"last": "Sagot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4668--4679", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.424" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fernando Alva-Manchego, Louis Martin, Antoine Bor- des, Carolina Scarton, Beno\u00eet Sagot, and Lucia Spe- cia. 2020. ASSET: A dataset for tuning and evalu- ation of sentence simplification models with multi- ple rewriting transformations. In Proceedings of the 58th Annual Meeting of the Association for Compu- tational Linguistics, pages 4668-4679, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Easse: Easier automatic sentence simplification evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Alva-Manchego", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Louis", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carolina", |
|
"middle": [], |
|
"last": "Scarton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1908.04567" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fernando Alva-Manchego, Louis Martin, Carolina Scarton, and Lucia Specia. 2019. Easse: Easier automatic sentence simplification evaluation. arXiv preprint arXiv:1908.04567.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Parallel iterative edit models for local sequence transduction", |
|
"authors": [ |
|
{ |
|
"first": "Abhijeet", |
|
"middle": [], |
|
"last": "Awasthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sunita", |
|
"middle": [], |
|
"last": "Sarawagi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rasna", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sabyasachi", |
|
"middle": [], |
|
"last": "Ghosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vihari", |
|
"middle": [], |
|
"last": "Piratla", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4260--4270", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1435" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhijeet Awasthi, Sunita Sarawagi, Rasna Goyal, Sabyasachi Ghosh, and Vihari Piratla. 2019. Parallel iterative edit models for local sequence transduction. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 4260- 4270, Hong Kong, China. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Neural machine translation by jointly learning to align and translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "3rd International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In 3rd Inter- national Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Text simplification for informationseeking applications", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Beata Beigman Klebanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Marcu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "On the Move to Meaningful Internet Systems 2004: CoopIS, DOA, and ODBASE", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "735--747", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Beata Beigman Klebanov, Kevin Knight, and Daniel Marcu. 2004. Text simplification for information- seeking applications. In On the Move to Meaningful Internet Systems 2004: CoopIS, DOA, and ODBASE, pages 735-747, Berlin, Heidelberg. Springer Berlin Heidelberg.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Text simplification for children", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Belder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie-Francine", |
|
"middle": [], |
|
"last": "Moens", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "SIGIR 2010", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. D. Belder and Marie-Francine Moens. 2010. Text simplification for children. In SIGIR 2010.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Practical simplification of english newspaper text to assist aphasic readers", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Carroll", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guido", |
|
"middle": [], |
|
"last": "Minnen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yvonne", |
|
"middle": [], |
|
"last": "Canning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siobhan", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Tait", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the AAAI-98 Workshop on Integrating Artificial Intelligence and Assistive Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7--10", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Carroll, Guido Minnen, Yvonne Canning, Siob- han Devlin, and John Tait. 1998. Practical simpli- fication of english newspaper text to assist aphasic readers. In Proceedings of the AAAI-98 Workshop on Integrating Artificial Intelligence and Assistive Technology, pages 7-10.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Simplifying text for language-impaired readers", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Carroll", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guido", |
|
"middle": [], |
|
"last": "Minnen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Darren", |
|
"middle": [], |
|
"last": "Pearce", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yvonne", |
|
"middle": [], |
|
"last": "Canning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siobhan", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Tait", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Ninth Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Carroll, Guido Minnen, Darren Pearce, Yvonne Canning, Siobhan Devlin, and John Tait. 1999. Sim- plifying text for language-impaired readers. In Ninth Conference of the European Chapter of the Association for Computational Linguistics, Bergen, Norway. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Motivations and methods for text simplification", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Chandrasekar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Doran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Srinivas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "The 16th International Conference on Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Chandrasekar, Christine Doran, and B. Srinivas. 1996. Motivations and methods for text simplifica- tion. In COLING 1996 Volume 2: The 16th Interna- tional Conference on Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Learning phrase representations using RNN encoder-decoder for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bart", |
|
"middle": [], |
|
"last": "Van Merri\u00ebnboer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caglar", |
|
"middle": [], |
|
"last": "Gulcehre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fethi", |
|
"middle": [], |
|
"last": "Bougares", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1724--1734", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/D14-1179" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kyunghyun Cho, Bart van Merri\u00ebnboer, Caglar Gul- cehre, Dzmitry Bahdanau, Fethi Bougares, Holger Schwenk, and Yoshua Bengio. 2014. Learning phrase representations using RNN encoder-decoder for statistical machine translation. In Proceedings of the 2014 Conference on Empirical Methods in Nat- ural Language Processing (EMNLP), pages 1724- 1734, Doha, Qatar. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Learning to simplify sentences using Wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Coster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Kauchak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the Workshop on Monolingual Text-To-Text Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Will Coster and David Kauchak. 2011. Learning to simplify sentences using Wikipedia. In Proceedings of the Workshop on Monolingual Text-To-Text Gener- ation, pages 1-9, Portland, Oregon. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Helping aphasic people process online information", |
|
"authors": [ |
|
{ |
|
"first": "Siobhan", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gary", |
|
"middle": [], |
|
"last": "Unthank", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 8th International ACM SIGACCESS Conference on Computers and Accessibility, Assets '06", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "225--226", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/1168987.1169027" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Siobhan Devlin and Gary Unthank. 2006. Helping aphasic people process online information. In Pro- ceedings of the 8th International ACM SIGACCESS Conference on Computers and Accessibility, Assets '06, page 225-226, New York, NY, USA. Associa- tion for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "EditNTS: An neural programmer-interpreter model for sentence simplification through explicit editing", |
|
"authors": [ |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zichao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mehdi", |
|
"middle": [], |
|
"last": "Rezagholizadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jackie Chi Kit", |
|
"middle": [], |
|
"last": "Cheung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3393--3402", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1331" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yue Dong, Zichao Li, Mehdi Rezagholizadeh, and Jackie Chi Kit Cheung. 2019. EditNTS: An neural programmer-interpreter model for sentence simplifi- cation through explicit editing. In Proceedings of the 57th Annual Meeting of the Association for Com- putational Linguistics, pages 3393-3402, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "An evaluation of syntactic simplification rules for people with autism", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Evans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Constantin", |
|
"middle": [], |
|
"last": "Or\u0203san", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iustin", |
|
"middle": [], |
|
"last": "Dornescu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 3rd Workshop on Predicting and Improving Text Readability for Target Reader Populations (PITR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "131--140", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/W14-1215" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard Evans, Constantin Or\u0203san, and Iustin Dor- nescu. 2014. An evaluation of syntactic simplifica- tion rules for people with autism. In Proceedings of the 3rd Workshop on Predicting and Improving Text Readability for Target Reader Populations (PITR), pages 131-140, Gothenburg, Sweden. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Ensemble distillation for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Freitag", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaser", |
|
"middle": [], |
|
"last": "Al-Onaizan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Sankaran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Markus Freitag, Yaser Al-Onaizan, and B. Sankaran. 2017. Ensemble distillation for neural machine translation. ArXiv, abs/1702.01802.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Natural language processing for social inclusion : a text simplification architecture for different literacy levels", |
|
"authors": [ |
|
{ |
|
"first": "Caroline", |
|
"middle": [], |
|
"last": "Gasperin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erick", |
|
"middle": [ |
|
"Galani" |
|
], |
|
"last": "Maziero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Thiago", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Pardo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Alu\u00edsio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Caroline Gasperin, Erick Galani Maziero, Lucia Spe- cia, Thiago A. S. Pardo, and Sandra M. Alu\u00edsio. 2009. Natural language processing for social inclu- sion : a text simplification architecture for different literacy levels.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Improving translation quality by manipulating sentence length", |
|
"authors": [ |
|
{ |
|
"first": "Laurie", |
|
"middle": [], |
|
"last": "Gerber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Machine Translation and the Information Soup", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "448--460", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Laurie Gerber and Eduard Hovy. 1998. Improving translation quality by manipulating sentence length. In Machine Translation and the Information Soup, pages 448-460, Berlin, Heidelberg. Springer Berlin Heidelberg.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Simplifying lexical simplification: Do we need simplified corpora?", |
|
"authors": [ |
|
{ |
|
"first": "Goran", |
|
"middle": [], |
|
"last": "Glava\u0161", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sanja\u0161tajner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "63--68", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/P15-2011" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Goran Glava\u0161 and Sanja\u0160tajner. 2015. Simplifying lexical simplification: Do we need simplified cor- pora? In Proceedings of the 53rd Annual Meet- ing of the Association for Computational Linguistics and the 7th International Joint Conference on Natu- ral Language Processing (Volume 2: Short Papers), pages 63-68, Beijing, China. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Levenshtein transformer", |
|
"authors": [ |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changhan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junbo", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "11181--11191", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiatao Gu, Changhan Wang, and Junbo Zhao. 2019. Levenshtein transformer. In Advances in Neural Information Processing Systems 32, pages 11181- 11191. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Dynamic multi-level multi-task learning for sentence simplification", |
|
"authors": [ |
|
{ |
|
"first": "Han", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramakanth", |
|
"middle": [], |
|
"last": "Pasunuru", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "462--476", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Han Guo, Ramakanth Pasunuru, and Mohit Bansal. 2018. Dynamic multi-level multi-task learning for sentence simplification. In Proceedings of the 27th International Conference on Computational Linguis- tics, pages 462-476, Santa Fe, New Mexico, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Source sentence simplification for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Eva", |
|
"middle": [], |
|
"last": "Hasler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adri\u00e0", |
|
"middle": [], |
|
"last": "De Gispert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Stahlberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aurelien", |
|
"middle": [], |
|
"last": "Waite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Byrne", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Computer Speech & Language", |
|
"volume": "45", |
|
"issue": "", |
|
"pages": "221--235", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.csl.2016.12.001" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eva Hasler, Adri\u00e0 de Gispert, Felix Stahlberg, Aurelien Waite, and Bill Byrne. 2017. Source sentence sim- plification for statistical machine translation. Com- puter Speech & Language, 45:221 -235.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Learning a lexical simplifier using Wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Colby", |
|
"middle": [], |
|
"last": "Horn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cathryn", |
|
"middle": [], |
|
"last": "Manduca", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Kauchak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "458--463", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/P14-2075" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colby Horn, Cathryn Manduca, and David Kauchak. 2014. Learning a lexical simplifier using Wikipedia. In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 458-463, Baltimore, Mary- land. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Verb paraphrase based on case frame alignment", |
|
"authors": [ |
|
{ |
|
"first": "Nobuhiro", |
|
"middle": [], |
|
"last": "Kaji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daisuke", |
|
"middle": [], |
|
"last": "Kawahara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sadao", |
|
"middle": [], |
|
"last": "Kurohashi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Satoshi", |
|
"middle": [], |
|
"last": "Sato", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "215--222", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1073083.1073120" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nobuhiro Kaji, Daisuke Kawahara, Sadao Kurohashi, and Satoshi Sato. 2002. Verb paraphrase based on case frame alignment. In Proceedings of the 40th Annual Meeting of the Association for Com- putational Linguistics, pages 215-222, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Selecting proper lexical paraphrase for children", |
|
"authors": [ |
|
{ |
|
"first": "Tomoyuki", |
|
"middle": [], |
|
"last": "Kajiwara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroshi", |
|
"middle": [], |
|
"last": "Matsumoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazuhide", |
|
"middle": [], |
|
"last": "Yamamoto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 25th Conference on Computational Linguistics and Speech Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "59--73", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomoyuki Kajiwara, Hiroshi Matsumoto, and Kazuhide Yamamoto. 2013. Selecting proper lexical paraphrase for children. In Proceedings of the 25th Conference on Computational Linguistics and Speech Processing (ROCLING 2013), pages 59-73, Kaohsiung, Taiwan. The Association for Computational Linguistics and Chinese Language Processing (ACLCLP).", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Improving text simplification language modeling using unsimplified text data", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Kauchak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1537--1546", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Kauchak. 2013. Improving text simplification language modeling using unsimplified text data. In Proceedings of the 51st Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 1537-1546, Sofia, Bulgaria. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Derivation of new readability formulas (automated readability index, fog count and flesch reading ease formula) for navy enlisted personnel", |
|
"authors": [ |
|
{ |
|
"first": "Robert P Fishburne", |
|
"middle": [], |
|
"last": "Peter Kincaid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Jr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brad", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Rogers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Chissom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1975, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J Peter Kincaid, Robert P Fishburne Jr, Richard L Rogers, and Brad S Chissom. 1975. Derivation of new readability formulas (automated readability in- dex, fog count and flesch reading ease formula) for navy enlisted personnel.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "3rd International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In 3rd Inter- national Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Complexity-weighted loss and diverse reranking for sentence simplification", |
|
"authors": [ |
|
{ |
|
"first": "Reno", |
|
"middle": [], |
|
"last": "Kriz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jo\u00e3o", |
|
"middle": [], |
|
"last": "Sedoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marianna", |
|
"middle": [], |
|
"last": "Apidianaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carolina", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gaurav", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eleni", |
|
"middle": [], |
|
"last": "Miltsakaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "3137--3147", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1317" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Reno Kriz, Jo\u00e3o Sedoc, Marianna Apidianaki, Car- olina Zheng, Gaurav Kumar, Eleni Miltsakaki, and Chris Callison-Burch. 2019. Complexity-weighted loss and diverse reranking for sentence simplifica- tion. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 3137-3147, Minneapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "BART: Denoising sequence-to-sequence pretraining for natural language generation, translation, and comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal ; Abdelrahman Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7871--7880", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.703" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Mar- jan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pre- training for natural language generation, translation, and comprehension. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, pages 7871-7880, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Text simplification with self-attention-based pointer-generator networks", |
|
"authors": [ |
|
{ |
|
"first": "Tianyu", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yun", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jipeng", |
|
"middle": [], |
|
"last": "Qiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yun-Hao", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Neural Information Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "537--545", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianyu Li, Yun Li, Jipeng Qiang, and Yun-Hao Yuan. 2018. Text simplification with self-attention-based pointer-generator networks. In Neural Information Processing, pages 537-545, Cham. Springer Inter- national Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Roberta: A robustly optimized BERT pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized BERT pretraining ap- proach. CoRR, abs/1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Felix: Flexible text editing through tagging and insertion", |
|
"authors": [ |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Mallinson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aliaksei", |
|
"middle": [], |
|
"last": "Severyn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Malmi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillermo", |
|
"middle": [], |
|
"last": "Garrido", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2003.10687" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonathan Mallinson, Aliaksei Severyn, Eric Malmi, and Guillermo Garrido. 2020. Felix: Flexible text edit- ing through tagging and insertion. arXiv preprint arXiv:2003.10687.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Encode, tag, realize: High-precision text editing", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Malmi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Krause", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sascha", |
|
"middle": [], |
|
"last": "Rothe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniil", |
|
"middle": [], |
|
"last": "Mirylenka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aliaksei", |
|
"middle": [], |
|
"last": "Severyn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5054--5065", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1510" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric Malmi, Sebastian Krause, Sascha Rothe, Daniil Mirylenka, and Aliaksei Severyn. 2019. Encode, tag, realize: High-precision text editing. In Proceed- ings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th Inter- national Joint Conference on Natural Language Pro- cessing (EMNLP-IJCNLP), pages 5054-5065, Hong Kong, China. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Controllable sentence simplification", |
|
"authors": [ |
|
{ |
|
"first": "Louis", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u00c9ric", |
|
"middle": [], |
|
"last": "De La Clergerie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beno\u00eet", |
|
"middle": [], |
|
"last": "Sagot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4689--4698", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Louis Martin,\u00c9ric de la Clergerie, Beno\u00eet Sagot, and Antoine Bordes. 2020a. Controllable sentence sim- plification. In Proceedings of The 12th Language Resources and Evaluation Conference, pages 4689- 4698, Marseille, France. European Language Re- sources Association.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "\u00c9ric de la Clergerie, Antoine Bordes, and Beno\u00eet Sagot. 2020b. Multilingual unsupervised sentence simplification", |
|
"authors": [ |
|
{ |
|
"first": "Louis", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2005.00352" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Louis Martin, Angela Fan,\u00c9ric de la Clergerie, An- toine Bordes, and Beno\u00eet Sagot. 2020b. Multilin- gual unsupervised sentence simplification. arXiv preprint arXiv:2005.00352.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Writing for language-impaired readers", |
|
"authors": [ |
|
{ |
|
"first": "Aur\u00e9lien", |
|
"middle": [], |
|
"last": "Max", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Computational Linguistics and Intelligent Text Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "567--570", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aur\u00e9lien Max. 2006. Writing for language-impaired readers. In Computational Linguistics and Intelli- gent Text Processing, pages 567-570, Berlin, Hei- delberg. Springer Berlin Heidelberg.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Hybrid simplification using deep semantics and machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Gardent", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "435--445", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/P14-1041" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shashi Narayan and Claire Gardent. 2014. Hybrid sim- plification using deep semantics and machine trans- lation. In Proceedings of the 52nd Annual Meet- ing of the Association for Computational Linguis- tics (Volume 1: Long Papers), pages 435-445, Balti- more, Maryland. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Exploring neural text simplification models", |
|
"authors": [ |
|
{ |
|
"first": "Sergiu", |
|
"middle": [], |
|
"last": "Nisioi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simone", |
|
"middle": [ |
|
"Paolo" |
|
], |
|
"last": "Sanja\u0161tajner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liviu", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Ponzetto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dinu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "85--91", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-2014" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sergiu Nisioi, Sanja\u0160tajner, Simone Paolo Ponzetto, and Liviu P. Dinu. 2017. Exploring neural text sim- plification models. In Proceedings of the 55th An- nual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 85-91, Vancouver, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Bayesian Optimization: Open source constrained global optimization tool for Python", |
|
"authors": [ |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Nogueira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fernando Nogueira. 2014. Bayesian Optimization: Open source constrained global optimization tool for Python.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "GECToR -grammatical error correction: Tag, not rewrite", |
|
"authors": [ |
|
{ |
|
"first": "Kostiantyn", |
|
"middle": [], |
|
"last": "Omelianchuk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vitaliy", |
|
"middle": [], |
|
"last": "Atrasevych", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Artem", |
|
"middle": [], |
|
"last": "Chernodub", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oleksandr", |
|
"middle": [], |
|
"last": "Skurzhanskyi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Fifteenth Workshop on Innovative Use of NLP for Building Educational Applications", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "163--170", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.bea-1.16" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kostiantyn Omelianchuk, Vitaliy Atrasevych, Artem Chernodub, and Oleksandr Skurzhanskyi. 2020. GECToR -grammatical error correction: Tag, not rewrite. In Proceedings of the Fifteenth Workshop on Innovative Use of NLP for Building Educational Applications, pages 163-170, Seattle, WA, USA\u00e2 \u2020' Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Reliable lexical simplification for non-native speakers", |
|
"authors": [ |
|
{ |
|
"first": "Gustavo", |
|
"middle": [], |
|
"last": "Paetzold", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Student Research Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9--16", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/N15-2002" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gustavo Paetzold. 2015. Reliable lexical simplification for non-native speakers. In Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Student Research Workshop, pages 9-16, Denver, Colorado. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Understanding the lexical simplification needs of nonnative speakers of English", |
|
"authors": [ |
|
{ |
|
"first": "Gustavo", |
|
"middle": [], |
|
"last": "Paetzold", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of COL-ING 2016, the 26th International Conference on Computational Linguistics: Technical Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "717--727", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gustavo Paetzold and Lucia Specia. 2016a. Under- standing the lexical simplification needs of non- native speakers of English. In Proceedings of COL- ING 2016, the 26th International Conference on Computational Linguistics: Technical Papers, pages 717-727, Osaka, Japan. The COLING 2016 Orga- nizing Committee.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Unsupervised lexical simplification for non-native speakers", |
|
"authors": [ |
|
{ |
|
"first": "Gustavo", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Paetzold", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Thirtieth AAAI Conference on Artificial Intelligence, AAAI'16", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3761--3767", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gustavo H. Paetzold and Lucia Specia. 2016b. Unsu- pervised lexical simplification for non-native speak- ers. In Proceedings of the Thirtieth AAAI Con- ference on Artificial Intelligence, AAAI'16, page 3761-3767. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1073083.1073135" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Com- putational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "An open corpus of everyday documents for simplification tasks", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Pellow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxine", |
|
"middle": [], |
|
"last": "Eskenazi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 3rd Workshop on Predicting and Improving Text Readability for Target Reader Populations (PITR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "84--93", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/W14-1210" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Pellow and Maxine Eskenazi. 2014. An open corpus of everyday documents for simplification tasks. In Proceedings of the 3rd Workshop on Pre- dicting and Improving Text Readability for Target Reader Populations (PITR), pages 84-93, Gothen- burg, Sweden. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Text simplification for language learners: A corpus analysis", |
|
"authors": [ |
|
{ |
|
"first": "Sarah", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Petersen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mari", |
|
"middle": [], |
|
"last": "Ostendorf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of Workshop on Speech and Language Technology for Education", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarah E. Petersen and Mari Ostendorf. 2007. Text sim- plification for language learners: A corpus analysis. In In Proceedings of Workshop on Speech and Lan- guage Technology for Education.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Improving neural text simplification model with simplified corpora", |
|
"authors": [ |
|
{ |
|
"first": "Jipeng", |
|
"middle": [], |
|
"last": "Qiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04428" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jipeng Qiang. 2018. Improving neural text simplifica- tion model with simplified corpora. arXiv preprint arXiv:1810.04428.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Simplify or help? text simplification strategies for people with dyslexia", |
|
"authors": [ |
|
{ |
|
"first": "Luz", |
|
"middle": [], |
|
"last": "Rello", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ricardo", |
|
"middle": [], |
|
"last": "Baeza-Yates", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Bott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Horacio", |
|
"middle": [], |
|
"last": "Saggion", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 10th International Cross-Disciplinary Conference on Web Accessibility, W4A '13", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2461121.2461126" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luz Rello, Ricardo Baeza-Yates, Stefan Bott, and Ho- racio Saggion. 2013a. Simplify or help? text simplification strategies for people with dyslexia. In Proceedings of the 10th International Cross- Disciplinary Conference on Web Accessibility, W4A '13, New York, NY, USA. Association for Comput- ing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "The impact of lexical simplification by verbal paraphrases for people with and without dyslexia", |
|
"authors": [ |
|
{ |
|
"first": "Luz", |
|
"middle": [], |
|
"last": "Rello", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ricardo", |
|
"middle": [], |
|
"last": "Baeza-Yates", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Horacio", |
|
"middle": [], |
|
"last": "Saggion", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Computational Linguistics and Intelligent Text Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "501--512", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luz Rello, Ricardo Baeza-Yates, and Horacio Sag- gion. 2013b. The impact of lexical simplification by verbal paraphrases for people with and without dyslexia. In Computational Linguistics and Intelli- gent Text Processing, pages 501-512, Berlin, Hei- delberg. Springer Berlin Heidelberg.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "Local string transduction as sequence labeling", |
|
"authors": [ |
|
{ |
|
"first": "Joana", |
|
"middle": [], |
|
"last": "Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shay", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xavier", |
|
"middle": [], |
|
"last": "Carreras", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1360--1371", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joana Ribeiro, Shashi Narayan, Shay B. Cohen, and Xavier Carreras. 2018. Local string transduction as sequence labeling. In Proceedings of the 27th Inter- national Conference on Computational Linguistics, pages 1360-1371, Santa Fe, New Mexico, USA. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "Neural machine translation of rare words with subword units", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1715--1725", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715- 1725, Berlin, Germany. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "Syntactic Simplification and Text Cohesion", |
|
"authors": [ |
|
{ |
|
"first": "Advaith", |
|
"middle": [], |
|
"last": "Siddharthan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Research on Language and Computation", |
|
"volume": "4", |
|
"issue": "1", |
|
"pages": "77--109", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s11168-006-9011-1" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Advaith Siddharthan. 2006. Syntactic Simplification and Text Cohesion. Research on Language and Computation, 4(1):77-109.", |
|
"links": null |
|
}, |
|
"BIBREF55": { |
|
"ref_id": "b55", |
|
"title": "A survey of research on text simplification", |
|
"authors": [ |
|
{ |
|
"first": "Advaith", |
|
"middle": [], |
|
"last": "Siddharthan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "International Journal of Applied Linguistics", |
|
"volume": "165", |
|
"issue": "2", |
|
"pages": "259--298", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Advaith Siddharthan. 2014. A survey of research on text simplification. International Journal of Applied Linguistics, 165(2):259-298.", |
|
"links": null |
|
}, |
|
"BIBREF56": { |
|
"ref_id": "b56", |
|
"title": "Enhancing multi-document summaries with sentence simplification", |
|
"authors": [ |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Botelho Silveira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ant\u00f3nio", |
|
"middle": [], |
|
"last": "Branco", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "ICAI 2012: International Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sara Botelho Silveira and Ant\u00f3nio Branco. 2012. En- hancing multi-document summaries with sentence simplification. In In ICAI 2012: International Con- ference on Artificial Intelligence, Las Vegas.", |
|
"links": null |
|
}, |
|
"BIBREF57": { |
|
"ref_id": "b57", |
|
"title": "Translating from complex to simplified sentences", |
|
"authors": [ |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 9th International Conference on Computational Processing of the Portuguese Language, PROPOR'10", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "30--39", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-642-12320-7_5" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lucia Specia. 2010. Translating from complex to sim- plified sentences. In Proceedings of the 9th In- ternational Conference on Computational Process- ing of the Portuguese Language, PROPOR'10, page 30-39, Berlin, Heidelberg. Springer-Verlag.", |
|
"links": null |
|
}, |
|
"BIBREF58": { |
|
"ref_id": "b58", |
|
"title": "A deeper exploration of the standard PB-SMT approach to text simplification and its evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Hannah", |
|
"middle": [], |
|
"last": "Sanja\u0161tajner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Horacio", |
|
"middle": [], |
|
"last": "B\u00e9chara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Saggion", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "823--828", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/P15-2135" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sanja\u0160tajner, Hannah B\u00e9chara, and Horacio Saggion. 2015. A deeper exploration of the standard PB-SMT approach to text simplification and its evaluation. In Proceedings of the 53rd Annual Meeting of the Asso- ciation for Computational Linguistics and the 7th In- ternational Joint Conference on Natural Language Processing (Volume 2: Short Papers), pages 823- 828, Beijing, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF59": { |
|
"ref_id": "b59", |
|
"title": "A detailed evaluation of neural sequence-to-sequence models for in-domain and cross-domain text simplification", |
|
"authors": [ |
|
{ |
|
"first": "Sergiu", |
|
"middle": [], |
|
"last": "Sanja\u0161tajner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nisioi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC-2018)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sanja\u0160tajner and Sergiu Nisioi. 2018. A detailed evaluation of neural sequence-to-sequence models for in-domain and cross-domain text simplifica- tion. In Proceedings of the Eleventh International Conference on Language Resources and Evalua- tion (LREC-2018), Miyazaki, Japan. European Lan- guages Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF60": { |
|
"ref_id": "b60", |
|
"title": "Can text simplification help machine translation?", |
|
"authors": [ |
|
{ |
|
"first": "Maja", |
|
"middle": [], |
|
"last": "Sanja\u0161tajner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Popovic", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 19th Annual Conference of the European Association for Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "230--242", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sanja\u0160tajner and Maja Popovic. 2016. Can text simpli- fication help machine translation? In Proceedings of the 19th Annual Conference of the European Associ- ation for Machine Translation, pages 230-242.", |
|
"links": null |
|
}, |
|
"BIBREF61": { |
|
"ref_id": "b61", |
|
"title": "BLEU is not suitable for the evaluation of text simplification", |
|
"authors": [ |
|
{ |
|
"first": "Elior", |
|
"middle": [], |
|
"last": "Sulem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omri", |
|
"middle": [], |
|
"last": "Abend", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ari", |
|
"middle": [], |
|
"last": "Rappoport", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "738--744", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1081" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elior Sulem, Omri Abend, and Ari Rappoport. 2018. BLEU is not suitable for the evaluation of text sim- plification. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Process- ing, pages 738-744, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF62": { |
|
"ref_id": "b62", |
|
"title": "Unsupervised neural text simplification", |
|
"authors": [ |
|
{ |
|
"first": "Sai", |
|
"middle": [], |
|
"last": "Surya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhijit", |
|
"middle": [], |
|
"last": "Mishra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anirban", |
|
"middle": [], |
|
"last": "Laha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Parag", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Sankaranarayanan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2058--2068", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1198" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sai Surya, Abhijit Mishra, Anirban Laha, Parag Jain, and Karthik Sankaranarayanan. 2019. Unsupervised neural text simplification. In Proceedings of the 57th Annual Meeting of the Association for Com- putational Linguistics, pages 2058-2068, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF63": { |
|
"ref_id": "b63", |
|
"title": "Sequence to sequence learning with neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V Le. 2014. Sequence to sequence learning with neural networks. In Z. Ghahramani, M. Welling, C. Cortes, N. D.", |
|
"links": null |
|
}, |
|
"BIBREF64": { |
|
"ref_id": "b64", |
|
"title": "Advances in Neural Information Processing Systems", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"Q" |
|
], |
|
"last": "Lawrence", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Weinberger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "27", |
|
"issue": "", |
|
"pages": "3104--3112", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lawrence, and K. Q. Weinberger, editors, Advances in Neural Information Processing Systems 27, pages 3104-3112. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF65": { |
|
"ref_id": "b65", |
|
"title": "OPUS-MT -Building open translation services for the World", |
|
"authors": [ |
|
{ |
|
"first": "J\u00f6rg", |
|
"middle": [], |
|
"last": "Tiedemann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Santhosh", |
|
"middle": [], |
|
"last": "Thottingal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 22nd Annual Conferenec of the European Association for Machine Translation (EAMT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J\u00f6rg Tiedemann and Santhosh Thottingal. 2020. OPUS-MT -Building open translation services for the World. In Proceedings of the 22nd Annual Con- ferenec of the European Association for Machine Translation (EAMT), Lisbon, Portugal.", |
|
"links": null |
|
}, |
|
"BIBREF66": { |
|
"ref_id": "b66", |
|
"title": "Sentence simplification for semantic role labeling", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Vickrey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daphne", |
|
"middle": [], |
|
"last": "Koller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of ACL-08: HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "344--352", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Vickrey and Daphne Koller. 2008. Sentence sim- plification for semantic role labeling. In Proceed- ings of ACL-08: HLT, pages 344-352, Columbus, Ohio. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF67": { |
|
"ref_id": "b67", |
|
"title": "Sentence simplification with memoryaugmented neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Tu", |
|
"middle": [], |
|
"last": "Vu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Baotian", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tsendsuren", |
|
"middle": [], |
|
"last": "Munkhdalai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "79--85", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-2013" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tu Vu, Baotian Hu, Tsendsuren Munkhdalai, and Hong Yu. 2018. Sentence simplification with memory- augmented neural networks. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, Volume 2 (Short Pa- pers), pages 79-85, New Orleans, Louisiana. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF68": { |
|
"ref_id": "b68", |
|
"title": "Text simplification using neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ping", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Rochford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jipeng", |
|
"middle": [], |
|
"last": "Qiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tong Wang, Ping Chen, John Rochford, and Jipeng Qiang. 2016. Text simplification using neural ma- chine translation.", |
|
"links": null |
|
}, |
|
"BIBREF69": { |
|
"ref_id": "b69", |
|
"title": "Renata Pontin de Mattos Fortes, Thiago Alexandre Salgueiro Pardo, and Sandra Maria Alu\u00edsio", |
|
"authors": [ |
|
{ |
|
"first": "Arnaldo", |
|
"middle": [], |
|
"last": "Willian Massami Watanabe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vin\u00edcius", |
|
"middle": [], |
|
"last": "Candido Junior", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rodriguez Uz\u00eada", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 27th ACM International Conference on Design of Communication, SIGDOC '09", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "29--36", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/1621995.1622002" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Willian Massami Watanabe, Arnaldo Candido Junior, Vin\u00edcius Rodriguez Uz\u00eada, Renata Pontin de Mat- tos Fortes, Thiago Alexandre Salgueiro Pardo, and Sandra Maria Alu\u00edsio. 2009. Facilita: Reading as- sistance for low-literacy readers. In Proceedings of the 27th ACM International Conference on Design of Communication, SIGDOC '09, page 29-36, New York, NY, USA. Association for Computing Machin- ery.", |
|
"links": null |
|
}, |
|
"BIBREF72": { |
|
"ref_id": "b72", |
|
"title": "Huggingface's transformers: State-of-the-art natural language processing", |
|
"authors": [], |
|
"year": null, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Huggingface's transformers: State-of-the-art natural language processing. ArXiv, abs/1910.03771.", |
|
"links": null |
|
}, |
|
"BIBREF73": { |
|
"ref_id": "b73", |
|
"title": "Learning to simplify sentences with quasi-synchronous grammar and integer programming", |
|
"authors": [ |
|
{ |
|
"first": "Kristian", |
|
"middle": [], |
|
"last": "Woodsend", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "409--420", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kristian Woodsend and Mirella Lapata. 2011. Learn- ing to simplify sentences with quasi-synchronous grammar and integer programming. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 409-420, Edin- burgh, Scotland, UK. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF74": { |
|
"ref_id": "b74", |
|
"title": "Text rewriting improves semantic role labeling", |
|
"authors": [ |
|
{ |
|
"first": "Kristian", |
|
"middle": [], |
|
"last": "Woodsend", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 26th International Joint Conference on Artificial Intelligence, IJCAI'17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5095--5099", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kristian Woodsend and Mirella Lapata. 2017. Text rewriting improves semantic role labeling. In Proceedings of the 26th International Joint Con- ference on Artificial Intelligence, IJCAI'17, page 5095-5099. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF75": { |
|
"ref_id": "b75", |
|
"title": "Sentence simplification by monolingual machine translation", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sander Wubben", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Van Den", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emiel", |
|
"middle": [], |
|
"last": "Bosch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Krahmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1015--1024", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sander Wubben, Antal van den Bosch, and Emiel Krah- mer. 2012. Sentence simplification by monolingual machine translation. In Proceedings of the 50th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1015- 1024, Jeju Island, Korea. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF76": { |
|
"ref_id": "b76", |
|
"title": "Problems in current text simplification research: New data can help", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Courtney", |
|
"middle": [], |
|
"last": "Napoles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "283--297", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00139" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Xu, Chris Callison-Burch, and Courtney Napoles. 2015. Problems in current text simplification re- search: New data can help. Transactions of the Asso- ciation for Computational Linguistics, 3:283-297.", |
|
"links": null |
|
}, |
|
"BIBREF77": { |
|
"ref_id": "b77", |
|
"title": "Optimizing statistical machine translation for text simplification", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Courtney", |
|
"middle": [], |
|
"last": "Napoles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quanze", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "401--415", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00107" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Xu, Courtney Napoles, Ellie Pavlick, Quanze Chen, and Chris Callison-Burch. 2016a. Optimizing statistical machine translation for text simplification. Transactions of the Association for Computational Linguistics, 4:401-415.", |
|
"links": null |
|
}, |
|
"BIBREF78": { |
|
"ref_id": "b78", |
|
"title": "Optimizing statistical machine translation for text simplification", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Courtney", |
|
"middle": [], |
|
"last": "Napoles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quanze", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "401--415", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Xu, Courtney Napoles, Ellie Pavlick, Quanze Chen, and Chris Callison-Burch. 2016b. Optimizing statistical machine translation for text simplification. Transactions of the Association for Computational Linguistics, 4:401-415.", |
|
"links": null |
|
}, |
|
"BIBREF79": { |
|
"ref_id": "b79", |
|
"title": "Sentence simplification with deep reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "Xingxing", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "584--594", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1062" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xingxing Zhang and Mirella Lapata. 2017. Sentence simplification with deep reinforcement learning. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 584-594, Copenhagen, Denmark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF80": { |
|
"ref_id": "b80", |
|
"title": "A constrained sequenceto-sequence neural model for sentence simplification", |
|
"authors": [ |
|
{ |
|
"first": "Yaoyuan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhenxu", |
|
"middle": [], |
|
"last": "Ye", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yansong", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongyan", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yaoyuan Zhang, Zhenxu Ye, Yansong Feng, Dongyan Zhao, and Rui Yan. 2017. A constrained sequence- to-sequence neural model for sentence simplifica- tion. ArXiv, abs/1704.02312.", |
|
"links": null |
|
}, |
|
"BIBREF81": { |
|
"ref_id": "b81", |
|
"title": "Integrating transformer and paraphrase rules for sentence simplification", |
|
"authors": [ |
|
{ |
|
"first": "Sanqiang", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Meng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daqing", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andi", |
|
"middle": [], |
|
"last": "Saptono", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Parmanto", |
|
"middle": [], |
|
"last": "Bambang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.11193" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sanqiang Zhao, Rui Meng, Daqing He, Saptono Andi, and Parmanto Bambang. 2018a. Integrating trans- former and paraphrase rules for sentence simplifica- tion. arXiv preprint arXiv:1810.11193.", |
|
"links": null |
|
}, |
|
"BIBREF82": { |
|
"ref_id": "b82", |
|
"title": "Integrating transformer and paraphrase rules for sentence simplification", |
|
"authors": [ |
|
{ |
|
"first": "Sanqiang", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Meng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daqing", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andi", |
|
"middle": [], |
|
"last": "Saptono", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bambang", |
|
"middle": [], |
|
"last": "Parmanto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3164--3173", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1355" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sanqiang Zhao, Rui Meng, Daqing He, Andi Saptono, and Bambang Parmanto. 2018b. Integrating trans- former and paraphrase rules for sentence simplifi- cation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 3164-3173, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF83": { |
|
"ref_id": "b83", |
|
"title": "Semi-supervised text simplification with back-translation and asymmetric denoising autoencoders", |
|
"authors": [ |
|
{ |
|
"first": "Yanbin", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "The Thirty-Second Innovative Applications of Artificial Intelligence Conference", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "9668--9675", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yanbin Zhao, Lu Chen, Zhi Chen, and Kai Yu. 2020a. Semi-supervised text simplification with back-translation and asymmetric denoising autoen- coders. In The Thirty-Fourth AAAI Conference on Artificial Intelligence, AAAI 2020, The Thirty- Second Innovative Applications of Artificial Intelli- gence Conference, IAAI 2020, The Tenth AAAI Sym- posium on Educational Advances in Artificial Intel- ligence, EAAI 2020, New York, NY, USA, February 7-12, 2020, pages 9668-9675. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF84": { |
|
"ref_id": "b84", |
|
"title": "Semi-supervised text simplification with back-translation and asymmetric denoising autoencoders", |
|
"authors": [ |
|
{ |
|
"first": "Yanbin", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9668--9675", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yanbin Zhao, Lu Chen, Zhi Chen, and Kai Yu. 2020b. Semi-supervised text simplification with back-translation and asymmetric denoising autoen- coders. In AAAI, pages 9668-9675.", |
|
"links": null |
|
}, |
|
"BIBREF85": { |
|
"ref_id": "b85", |
|
"title": "A monolingual tree-based translation model for sentence simplification", |
|
"authors": [ |
|
{ |
|
"first": "Zhemin", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Delphine", |
|
"middle": [], |
|
"last": "Bernhard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 23rd International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1353--1361", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhemin Zhu, Delphine Bernhard, and Iryna Gurevych. 2010. A monolingual tree-based translation model for sentence simplification. In Proceedings of the 23rd International Conference on Computational Linguistics (Coling 2010), pages 1353-1361, Bei- jing, China. Coling 2010 Organizing Committee.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "recent works such as Malmi et al. (2019); Awasthi et al. (2019); Omelianchuk et al.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF2": { |
|
"text": "Dataset splits and sizes.", |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"text": "Comparison of our system against recent stateof-the-art Neural Text Simplification models on Turk-Corpus, ASSET and WikiSmall test sets.", |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null |
|
}, |
|
"TABREF6": { |
|
"text": "Average SARI and FKGL scores (ASSET and TurkCorpus test sets)", |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null |
|
}, |
|
"TABREF7": { |
|
"text": "Examples of simplifications by TST", |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |