|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:05:26.763035Z" |
|
}, |
|
"title": "Flesch-Kincaid is Not a Text Simplification Evaluation Metric", |
|
"authors": [ |
|
{ |
|
"first": "Teerapaun", |
|
"middle": [], |
|
"last": "Tanprasert", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Pomona College Claremont", |
|
"location": { |
|
"region": "CA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Kauchak", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Pomona College Claremont", |
|
"location": { |
|
"region": "CA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Sentence-level text simplification is evaluated using both automated metrics and human evaluation. For automatic evaluation, a combination of metrics is usually employed to evaluate different aspects of the simplification. Flesch-Kincaid Grade Level (FKGL) is one metric that has been regularly used to measure the readability of system output. In this paper, we argue that FKGL should not be used to evaluate text simplification systems. We provide experimental analyses on recent system output showing that the FKGL score can easily be manipulated to improve the score dramatically with only minor impact on other automated metrics (BLEU and SARI). Instead of using FKGL, we suggest that the component statistics, along with others, be used for posthoc analysis to understand system behavior.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Sentence-level text simplification is evaluated using both automated metrics and human evaluation. For automatic evaluation, a combination of metrics is usually employed to evaluate different aspects of the simplification. Flesch-Kincaid Grade Level (FKGL) is one metric that has been regularly used to measure the readability of system output. In this paper, we argue that FKGL should not be used to evaluate text simplification systems. We provide experimental analyses on recent system output showing that the FKGL score can easily be manipulated to improve the score dramatically with only minor impact on other automated metrics (BLEU and SARI). Instead of using FKGL, we suggest that the component statistics, along with others, be used for posthoc analysis to understand system behavior.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Critical to any application area is evaluation. Evaluation is often accomplished using one or more quantifiable evaluation metrics. Evaluation metrics are the main tool for comparing and analyzing approaches (Hossin and Sulaiman, 2015) and are often used to define whether progress is being made in a field. A good evaluation metric should be a proper measure of the quality of a particular algorithm and, importantly, should not be \"gameable\". Specifically, an approach should not be able to obtain a better score on the evaluation metric by manipulating the algorithm or output in ways that do not improve the actual quality of the output.", |
|
"cite_spans": [ |
|
{ |
|
"start": 208, |
|
"end": 235, |
|
"text": "(Hossin and Sulaiman, 2015)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we examine evaluation for text simplification, specifically, sentence-level text simplification. Text simplification aims to transform text into a variant that is easier to understand by a broader range of people while retaining as much of the original content as possible. A range of approaches for text simplification have been pro-posed ranging from lexical simplification (Shardlow, 2014) , where only words and phrases are changed, to fully generative approaches that leverage models from machine translation (Coster and Kauchak, 2011a; Wubben et al., 2012) and recent sequential neural networks (Nisioi et al., 2017; Zhang and Lapata, 2017; Nishihara et al., 2019) . Text simplification evaluation has been done with two general approaches: human evaluation and automated metrics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 391, |
|
"end": 407, |
|
"text": "(Shardlow, 2014)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 529, |
|
"end": 556, |
|
"text": "(Coster and Kauchak, 2011a;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 557, |
|
"end": 577, |
|
"text": "Wubben et al., 2012)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 616, |
|
"end": 637, |
|
"text": "(Nisioi et al., 2017;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 638, |
|
"end": 661, |
|
"text": "Zhang and Lapata, 2017;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 662, |
|
"end": 685, |
|
"text": "Nishihara et al., 2019)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Human evaluation relies on annotators to judge the quality of the simplifications on three dimensions: fluency/grammaticality, how well the sentence represents fluent, grammatical text; adequacy, how well the content is preserved; and, simplicity, how simple the text is (Woodsend and Lapata, 2011) . The first two metrics were adapted from other text generation tasks (Knight and Marcu, 2002) with the addition of simplicity for text simplification. When human evaluation is used, these three metrics have been consistently employed. Human evaluations provide concrete analysis of texts simplification systems along important dimensions, however, human evaluation is costly and is not practical for development, tuning, and other real-time uses. As such, text simplification has also relied on automated metrics for evaluation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 271, |
|
"end": 298, |
|
"text": "(Woodsend and Lapata, 2011)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 369, |
|
"end": 393, |
|
"text": "(Knight and Marcu, 2002)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Automatic evaluation of text simplification has varied more across papers, though three metrics are most commonly employed: BLEU, SARI, and Flesch-Kincaid. BLEU (Papineni et al., 2001 ) compares the n-gram overlap via precision of a system simplification with a human reference simplification and was borrowed from machine translation. BLEU was the first metric suggested for text simplification that utilized reference simplifications (Zhu et al., 2010) , however, it focuses less on simplicity and more on fluency and content preservation. To counter this, SARI was proposed as an alternate metric (Xu et al., 2016) . SARI also compares against human references, but also utilizes the input sentence allowing it to better capture addition and deletion of information.", |
|
"cite_spans": [ |
|
{ |
|
"start": 161, |
|
"end": 183, |
|
"text": "(Papineni et al., 2001", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 436, |
|
"end": 454, |
|
"text": "(Zhu et al., 2010)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 600, |
|
"end": 617, |
|
"text": "(Xu et al., 2016)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Finally, a third automated metric that has been used to measure readability and fluency is Flesch-Kincaid Grade Level (FKGL) . FKGL was initially proposed in the 1940s (Flesch, 1948) and since then has been used extensively in the medical domain, though it has never been shown to affect actual comprehension (Shardlow, 2014; Kauchak and Leroy, 2016) . FKGL combines two text statistics to calculate the score: the average number of syllables per word and the average number of words per sentence:", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 124, |
|
"text": "Flesch-Kincaid Grade Level (FKGL)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 168, |
|
"end": 182, |
|
"text": "(Flesch, 1948)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 309, |
|
"end": 325, |
|
"text": "(Shardlow, 2014;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 326, |
|
"end": 350, |
|
"text": "Kauchak and Leroy, 2016)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "F KGL = 0.39 N words N sentences +11.8 N syllables N words \u221215.59", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In recent text simplification papers, both BLEU and SARI are common evaluation metrics (Vu et al., 2018; Guo et al., 2018; Scarton and Specia, 2018; Qiang, 2018; Niklaus et al., 2019; Nishihara et al., 2019) . FKGL is not as popular as it was before SARI was introduced, but it continues to be used as an evaluation metric in recent papers (Xu et al., 2016; Zhang and Lapata, 2017; Guo et al., 2018; Qiang, 2018; Scarton and Specia, 2018; Nassar et al., 2019; Nishihara et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 104, |
|
"text": "(Vu et al., 2018;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 105, |
|
"end": 122, |
|
"text": "Guo et al., 2018;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 123, |
|
"end": 148, |
|
"text": "Scarton and Specia, 2018;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 161, |
|
"text": "Qiang, 2018;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 162, |
|
"end": 183, |
|
"text": "Niklaus et al., 2019;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 184, |
|
"end": 207, |
|
"text": "Nishihara et al., 2019)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 340, |
|
"end": 357, |
|
"text": "(Xu et al., 2016;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 358, |
|
"end": 381, |
|
"text": "Zhang and Lapata, 2017;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 382, |
|
"end": 399, |
|
"text": "Guo et al., 2018;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 400, |
|
"end": 412, |
|
"text": "Qiang, 2018;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 413, |
|
"end": 438, |
|
"text": "Scarton and Specia, 2018;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 439, |
|
"end": 459, |
|
"text": "Nassar et al., 2019;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 460, |
|
"end": 483, |
|
"text": "Nishihara et al., 2019)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we argue that FKGL is not a proper evaluation metric for text simplification and should not be used to evaluate text simplification systems, i.e., alongside other metrics like BLEU and SARI. FKGL was one of the first metrics suggested for text simplification (Zhu et al., 2010) and has been used by many as an evaluation metric to compare systems. However, FKGL was not originally designed to evaluate system output (it was designed to measure human output) and, because of its simplistic nature, is very easy to game, either explicitly (as we do in this paper) or implicitly by certain model biases (e.g., text simplification algorithms that split sentences will tend to have better FKGL scores). Recent work has shown that systems with good FKGL scores are not necessarily correlated with high-quality simplifications (Martin et al., 2018; Alva-Manchego et al., 2020) , however, this is the first in-depth analysis of the FKGL metric for evaluation and where specific system transformations are analyzed.", |
|
"cite_spans": [ |
|
{ |
|
"start": 274, |
|
"end": 292, |
|
"text": "(Zhu et al., 2010)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 835, |
|
"end": 856, |
|
"text": "(Martin et al., 2018;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 857, |
|
"end": 884, |
|
"text": "Alva-Manchego et al., 2020)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To explore how FKGL can be manipulated, we introduce six simple methods for modifying system output and examine the impact these modifications have on automated evaluation metrics. The modifications could be made explicitly by a system in an attempt to improve their score, or, more worrisome, implicitly. In addition to the FKGL scores, we also present and and discuss how BLEU and SARI respond to the modifications. We show that with some very minor modifications, FKGL can be improved dramatically with minimal effect on the other two evaluation metrics. We conclude with some recommendations on how to incorporate FKGL-like metrics into text simplification analysis.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The earliest version of the Flesch-Kincaid readability formula appears in Flesch's doctoral dissertation (Flesch, 1943) and calculated based on the the average number of words per sentence, the number of affixes, and the number of references to people. The formula was derived based on the McCall-Crabbs Standard Test Lessons in Reading (McCall and Crabbs, 1926) , a standardized test given to children in grades 3-7. The McCall-Crabbs tests contains 376 passages with 8 reading comprehensive questions per passage. Each lesson is labeled with its difficulty as a grade level. Based on these texts, Flesch developed the formula to predict the grade of children in grades 3-7 who answered at least 75% of the questions correctly about a given passage. The original goal of the formula was to help students track their progress.", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 119, |
|
"text": "(Flesch, 1943)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 362, |
|
"text": "(McCall and Crabbs, 1926)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "History of Flesch-Kincaid", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Five years later, he published a new formula: the Reading Ease Score (Flesch, 1948) . He adjusted the original formula by recomputing the coefficients and replacing previous text measurements with the ones used today, the average number of syllables and the average sentences length. Like the original study, this new formula was validated with children and was based on the same criterion, McCall-Crabbs Standard Test Lessons in Reading.", |
|
"cite_spans": [ |
|
{ |
|
"start": 69, |
|
"end": 83, |
|
"text": "(Flesch, 1948)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "History of Flesch-Kincaid", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Flesch-Kincaid Grade Level is a variation of the Reading Ease formula with readjusted weights and is the formula that has been commonly used in text simplification evaluation. The formula was derived three decades later (Kincaid et al., 1975) specifically to evaluate the readability of technical materials for military personnel. 531 Navy personnel in four technical training schools at Navy bases were tested for their reading comprehension level according to the comprehension section of the Gates-McGinitie reading test as well as their comprehension of 18 passages from Rate Training Manuals. Despite the fact that this formula was derived from Navy personnel, with military-based material, and specifically for Navy use, it has been broadly used in a range of settings to evaluate the readability of text, for example, it is commonly used to guide text generation by medical writers in the medical domain and even Microsoft Word includes both the Flesch Reading Ease and FKGL scores (Shedlosky-Shoemaker et al., 2009) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 242, |
|
"text": "(Kincaid et al., 1975)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 989, |
|
"end": 1023, |
|
"text": "(Shedlosky-Shoemaker et al., 2009)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "History of Flesch-Kincaid", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We provide this background to raise some concerns based on its origins for its application for text simplification evaluation. The inputs of the formula -sentence count, word count, and syllable countwere decided based on a study in the 1940s where modern text analysis tools were not available. Both the Flesch Reading Ease and FKGL scores were developed based on very specific corpora and very targeted populations, children grades 3-7 in the former case and Navy personnel in the latter case. Most importantly, the text passages used to collect data were always written by people and assumed to be mostly free of errors in terms of writing. These assumption cannot be made for text generated by automated systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "History of Flesch-Kincaid", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "One of the main drawbacks of the FKGL metric is that the formula is based on fairly simplistic text statistics. Because of this, it is straightforward to manipulate the output of a text simplification to artificially improve the FKGL score. We suggest six approaches to modify the output of an automatically simplified text that aim to manipulate these statistics. We view the modifications as an explicit post-processing step, however, many of them could be incorporated into a text simplification system either explicitly as a way to improve the score, or implicitly as a side-effect of the algorithm used (e.g., sentence splitting). Each approach suggested modifies the output text on a sentence level. In the analyses we consider the effect of applying each approach to varying proportions of the sentences output by the system. random-period: Randomly insert a period into the sentence. Adding a period to the sentence splits the sentence into two sentences which reduces the average number of words per sentence. random-the: Randomly insert the word \"the\" into the sentence. This adds a short and very common word to reduce the average syllable count per word while minimizing the impact on the meaning. replace-longest: Replace the longest word in the sentence (by character count) with the word \"the\". Assuming that the number of characters in a word positively correlates with the number of syllables, replacing the longest word with \"the\" should reduce the average syllable count per word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Modifying Text Simplification Output", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "replace-rand-period: Replace a random word with a period in the sentence. This is similar to random-period, but additionally removes a random word to reduce the number of words per sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Modifying Text Simplification Output", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "replace-rand-the: Replace a random word with \"the\": imitates random-the., but additionally removes a random word to reduce the number of words per sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Modifying Text Simplification Output", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "rand-period+ repl-longest: combine randomperiod and replace-longest to magnify the effects on FKGL.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Modifying Text Simplification Output", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "To understand the problems with FKGL, we analyzed the output from the five text simplification systems examined by Zhang and Lapata (2017) , a number of which are state-of-the-art: PBMT-R (Wubben et al., 2012), a phrase-based approach based on statistical MT; Hybrid (Narayan and Gardent, 2014), a model that combines sentence splitting and deletion with PBMT-R; EncDecA, a basic neural encoder-decoder model with attention; and two deep reinforcement learning models, Dress and Dress-Ls (Zhang and Lapata, 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 115, |
|
"end": 138, |
|
"text": "Zhang and Lapata (2017)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 488, |
|
"end": 512, |
|
"text": "(Zhang and Lapata, 2017)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "There are two main corpora that are used to train and evaluate text simplification systems: Wikipedia (Zhu et al., 2010; Coster and Kauchak, 2011b) , which consists of automatically aligned sentences between English Wikipedia and Simple English Wikipedia, and Newsela (Xu et al., 2015) , which consists of news articles manually simplified at varying levels of simplicity. We present the results for the Newsela corpus since it involves explicit human simplification and has been shown to be less noisy than the Wikipedia corpus (Xu et al., 2015) . We also conducted the experimental analysis on the Wikipedia corpus and saw similar results.", |
|
"cite_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 120, |
|
"text": "(Zhu et al., 2010;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 121, |
|
"end": 147, |
|
"text": "Coster and Kauchak, 2011b)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 268, |
|
"end": 285, |
|
"text": "(Xu et al., 2015)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 529, |
|
"end": 546, |
|
"text": "(Xu et al., 2015)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We applied each of the modification techniques to a varied percentage of output sentences, from 10% to 100% in increments of 10%, for the five text simplification systems. The sentences to be modified were randomly selected from the system output.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We calculated FKGL 1 as well as BLEU (Papineni et al., 2001 ) and SARI 2 (Xu et al., 2016) to observe how the modifications affect other common text simplification evaluation metrics. To account for per-sentence variation and randomness in some of the modification approaches, we repeated the experiments 100 times and averaged the results. Figure 1 shows the trends of the effect that the modification approaches have on FKGL for Dress-Ls, and Table 1 presents more detailed experimental results for the three best performing systems (Dress-Ls, EncDecA, and Hybrid). The three methods that involve sentence splitting result in aggressive improvements in the FKGL score; replacing the longest word shows some improvement; and the other two approaches involving \"the\" have minimal effect. In the most extreme case, rand-period+ repl-longest reduces the FKGL score to almost zero when applied to all of the sentences. With simple post-processing applied to the output, a text simplification approach can achieve an arbitrarily low FKGL score. Figures 2 and 3 show the effect that the modification approaches have on the BLEU and SARI scores for Dress-Ls. There is virtually no effect on the SARI scores by any of the modification techniques and none of the approaches change the score by more than 0.004, regardless of percentage of sentences modified. BLEU, on the other hand, does register some differences for the modified output. rand-period+ repl-longest has the most drastic effect and, in the most extreme case, for Dress-Ls it reduces the BLEU score from 0.2374 to 0.1710 when it is applied to all sentences. The other five modification techniques have more minor effects, e.g., random-period drops the score to 0.1953, when applied to all sentences.", |
|
"cite_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 59, |
|
"text": "(Papineni et al., 2001", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 73, |
|
"end": 90, |
|
"text": "(Xu et al., 2016)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 341, |
|
"end": 349, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 445, |
|
"end": 452, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 1041, |
|
"end": 1056, |
|
"text": "Figures 2 and 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Using multiple evaluation metrics partially mitigates the gameability of FKGL since BLEU is affected. However, the effect on BLEU is significantly smaller than the effect on FKGL. While the Dress-Ls system did originally have the highest BLEU and SARI scores, it did not have the highest FKGL score. However, if we randomly inserted a period into just 10% of the sentences of the Dress-Ls output, the FKGL score would improve to 4.543, the BLEU score would drop slightly to 1 https://github.com/mmautner/readability 2 We used the implementation for BLEU and SARI from the Joshua Simplification System. 0.233 and there is no significant change in SARI score. After the transformation, the system would still be the best performing model with respect to BLEU and SARI, but now it would also be the best performing model with respect to FKGL. simple modification to the system output, the best performing model could be changed with respect to FKGL without affecting the other two metrics significantly.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "For the sake of brevity, we only include detailed experimental analysis of the output of Dress-Ls, however, the results were similar across all systems 3 . To provide some additional examples, Table 1 shows the FKGL, BLEU, and SARI scores for Dress-Ls, EncDecA, and Hybrid where 10%, 50%, and 100% of the sentences were modified. We chose EncDecA and Hybrid as additional systems to include since they performed well on at least one of the automated metrics and represent fairly different approaches to the text simplification problem. The trends seen for Dress-Ls are also seen with the other two systems: FKGL can be aggressively improved, BLEU is slightly impacted, and SARI is not affected. Regardless of the type of system, because of the simplicity of FKGL, the results can be arbitrarily improved.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "3 Complete experimental results are included in the appendix.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Although the focus of this paper was on FKGL, we also analyzed BLEU and SARI further to understand why the modification approaches affected those metrics. The BLEU score is calculated as the average of the n-gram precisions of size 1 to 4, where precision is the proportion of n-grams in the system output that are found in the corresponding reference simplification. The SARI score is an average of F1 scores based on three operations relative to the reference text: added n-grams, kept n-grams, and deleted n-grams. Table 2 shows each of the individual component calculations for the Dress-Ls system when the six modifications are applied to 100% of the sentences. Since the approaches rely on randomization, the results shown are an average of 100 trials. For conciseness, we only include the results for Dress-Ls, though all systems showed very similar trends. Full results, including 2-gram and 3-gram F1 and precision scores for SARI, for all systems are provided in the appendix.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 518, |
|
"end": 525, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Understanding BLEU and SARI", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "For BLEU, all levels of precision drop for all three modification approaches. The 1-gram precision is the least affected, while larger n-gram precisions show increasingly larger effects. This intuitively makes sense since randomly inserting/replacing a word in an originally correct sequence of words should affect multiple n-grams of larger size. None of the decreases are large in magnitude, but they are all in the same direction and contribute to the slight drop in BLEU scores.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Understanding BLEU and SARI", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "For SARI, at the 1-gram level, the Add F1 score actually improves for both random-the and replacelongest since they add a common word (\"the\") that has a high likelihood of matching with a word in the reference simplification. However, for longer ngrams the Add F1 score drops for similar reasons to the BLEU score precisions drop. Besides the Add F1 score, however, the other scores remain virtually unchanged. In aggregate, the Add effect tends to balance out between increases in smaller n-grams and decreases in larger n-grams and because the other components do not change much, the overall SARI score remains unaffected.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Understanding BLEU and SARI", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The effects of the modifications on BLEU and SARI are minimal, especially compared to the effects on FKGL. While this helps illustrate how a manipulation of FKGL could be done, it does not necessarily imply that BLEU and SARI are sufficiently reliable. Even though both metrics are relatively resilient against our modification approaches, these approaches were designed specifically to manipulate the FKGL score and, thus, do not serve as evidence against the concerns that have been raised about their robustness (Callison-Burch et al., 2006; Sulem et al., 2018) . ", |
|
"cite_spans": [ |
|
{ |
|
"start": 515, |
|
"end": 544, |
|
"text": "(Callison-Burch et al., 2006;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 545, |
|
"end": 564, |
|
"text": "Sulem et al., 2018)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Understanding BLEU and SARI", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "FKGL should not be used as an evaluation metric. Instead, it can be used for post-hoc analysis to understand the behavior of the systems. Even better, rather than reporting the FKGL score, which can be affected by multiple types of changes in the system, papers can report the individual components of FGKL, i.e., the average sentence length and the average number of syllables. This demystifies the readability score and provides concrete information about the types of changes that are being made by the systems. A comparative analysis of 30 metrics showed that these features are better correlated with human judgement than FKGL (Martin et al., 2018) , and some recent papers have reported the average sentence length statistic already (Kriz et al., 2019; Kumar et al., 2020; Maddela et al., 2021) . These two metrics can be supplemented with other corpus statistics that also help understand what changes the systems are making, e.g., the proportion of sentences that are split. Table 3 shows these three statistics for the five text simplification approaches. These statistics allow for a concrete analysis of what the different approaches are doing. All the models reduce the sentence length, except for PBMT-R. Hybrid is the most aggressive at creating short sentences, though it does not do any sentence splitting, so it accomplishes this through deletion, which may explain the low BLEU score. All of the models are selecting words with less syllables, except for Hybrid. Finally, all models except Hybrid are doing sentence splitting, with the EncDecA doing the least splitting. These statistics paint a much more vivid picture of what the different approach are doing than a single readability score.", |
|
"cite_spans": [ |
|
{ |
|
"start": 632, |
|
"end": 653, |
|
"text": "(Martin et al., 2018)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 739, |
|
"end": 758, |
|
"text": "(Kriz et al., 2019;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 759, |
|
"end": 778, |
|
"text": "Kumar et al., 2020;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 779, |
|
"end": 800, |
|
"text": "Maddela et al., 2021)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 983, |
|
"end": 990, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Better Approach", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In this paper, we have provided an experimental analysis of the FKGL score on state-of-the-art text simplification systems. We find that very basic postprocessing techniques can drastically improve the FKGL score of a system with negligible effects on two other metrics, BLEU and SARI. Based on these findings, we argue that FKGL should no longer be used as a text simplification evaluation metric. Instead, the components of FKGL and other related statistics should be used to help understand what different systems are doing. If this analysis is not compelling enough and FKGL continues to be used, then we propose concrete methods for improving FKGL, with minimal work and only minor effects on the other automated metrics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "7" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": " Tables 4-8 show the complete FKGL, BLEU and SARI scores for the modified outputs of all five systems: Dress-Ls, EncDecA, Hybrid, Dress and PBMT-R. B BLEU n-gram Score Breakdown Table 9 shows the precision scores for the individual n-grams (1-4) of the unmodified system output and output with all sentences modified (100%) for each of the six modification approaches on outputs of all five systems.C SARI n-gram Score Breakdown Table 10 shows the SARI component scores for the unmodified system output and with all sentences modified (100%) for each of the six modification approaches on all five systems. Table 10 : SARI score breakdown (F1 and precision scores used in the score calculation for 1-, 2-, 3-and 4-gram) for all combination of systems and modification approaches (long table spanning two pages)", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1, |
|
"end": 11, |
|
"text": "Tables 4-8", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 178, |
|
"end": 185, |
|
"text": "Table 9", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 429, |
|
"end": 437, |
|
"text": "Table 10", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 607, |
|
"end": 615, |
|
"text": "Table 10", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Experimental Results for All Systems", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Approach", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dress-Ls", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Data-driven sentence simplification: Survey and benchmark", |
|
"authors": [ |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Alva-Manchego", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carolina", |
|
"middle": [], |
|
"last": "Scarton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Computational Linguistics", |
|
"volume": "46", |
|
"issue": "1", |
|
"pages": "135--187", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fernando Alva-Manchego, Carolina Scarton, and Lu- cia Specia. 2020. Data-driven sentence simplifica- tion: Survey and benchmark. Computational Lin- guistics, 46(1):135-187.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Re-evaluation the role of bleu in machine translation research", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-", |
|
"middle": [], |
|
"last": "Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miles", |
|
"middle": [], |
|
"last": "Osborne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of European Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Callison-Burch, Miles Osborne, and Philipp Koehn. 2006. Re-evaluation the role of bleu in ma- chine translation research. In Proceedings of Euro- pean Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Learning to simplify sentences using wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Coster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Kauchak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the workshop on monolingual text-totext generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William Coster and David Kauchak. 2011a. Learn- ing to simplify sentences using wikipedia. In Pro- ceedings of the workshop on monolingual text-to- text generation.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Simple english wikipedia: a new text simplification task", |
|
"authors": [ |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Coster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Kauchak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of Assication for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William Coster and David Kauchak. 2011b. Simple english wikipedia: a new text simplification task. In Proceedings of Assication for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Marks of readable style; a study in adult education. Teachers College Contributions to Education", |
|
"authors": [ |
|
{ |
|
"first": "Rudolf", |
|
"middle": [], |
|
"last": "Flesch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1943, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rudolf Flesch. 1943. Marks of readable style; a study in adult education. Teachers College Contributions to Education.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A new readability yardstick", |
|
"authors": [ |
|
{ |
|
"first": "Rudolph", |
|
"middle": [], |
|
"last": "Flesch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1948, |
|
"venue": "Journal of applied psychology", |
|
"volume": "32", |
|
"issue": "3", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rudolph Flesch. 1948. A new readability yardstick. Journal of applied psychology, 32(3):221.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Dynamic multi-level multi-task learning for sentence simplification", |
|
"authors": [ |
|
{ |
|
"first": "Han", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramakanth", |
|
"middle": [], |
|
"last": "Pasunuru", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "462--476", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Han Guo, Ramakanth Pasunuru, and Mohit Bansal. 2018. Dynamic multi-level multi-task learning for sentence simplification. In Proceedings of Inter- national Conference on Computational Linguistics, pages 462-476.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A review on evaluation metrics for data classification evaluations", |
|
"authors": [ |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Hossin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sulaiman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Journal of Data Mining & Knowledge Management Process", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohammad Hossin and MN Sulaiman. 2015. A re- view on evaluation metrics for data classification evaluations. International Journal of Data Mining & Knowledge Management Process.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Moving beyond readability metrics for health-related text simplification. IT professional", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Kauchak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gondy", |
|
"middle": [], |
|
"last": "Leroy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "18", |
|
"issue": "", |
|
"pages": "45--51", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Kauchak and Gondy Leroy. 2016. Moving be- yond readability metrics for health-related text sim- plification. IT professional, 18(3):45-51.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Derivation of new readability formulas (automated readability index, fog count and flesch reading ease formula) for navy enlisted personnel", |
|
"authors": [ |
|
{ |
|
"first": "Robert P Fishburne", |
|
"middle": [], |
|
"last": "Peter Kincaid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Jr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brad", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Rogers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Chissom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1975, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J Peter Kincaid, Robert P Fishburne Jr, Richard L Rogers, and Brad S Chissom. 1975. Derivation of new readability formulas (automated readability in- dex, fog count and flesch reading ease formula) for navy enlisted personnel.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Summarization beyond sentence extraction: A probabilistic approach to sentence compression", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Marcu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Artificial Intelligence", |
|
"volume": "139", |
|
"issue": "1", |
|
"pages": "91--107", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Knight and Daniel Marcu. 2002. Summariza- tion beyond sentence extraction: A probabilistic ap- proach to sentence compression. Artificial Intelli- gence, 139(1):91-107.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Complexity-weighted loss and diverse reranking for sentence simplification", |
|
"authors": [ |
|
{ |
|
"first": "Reno", |
|
"middle": [], |
|
"last": "Kriz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joao", |
|
"middle": [], |
|
"last": "Sedoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marianna", |
|
"middle": [], |
|
"last": "Apidianaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carolina", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gaurav", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eleni", |
|
"middle": [], |
|
"last": "Miltsakaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3137--3147", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Reno Kriz, Joao Sedoc, Marianna Apidianaki, Carolina Zheng, Gaurav Kumar, Eleni Miltsakaki, and Chris Callison-Burch. 2019. Complexity-weighted loss and diverse reranking for sentence simplification. In Proceedings of NAACL-HLT, pages 3137-3147.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Iterative edit-based unsupervised sentence simplification", |
|
"authors": [ |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lili", |
|
"middle": [], |
|
"last": "Mou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lukasz", |
|
"middle": [], |
|
"last": "Golab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olga", |
|
"middle": [], |
|
"last": "Vechtomova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7918--7928", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dhruv Kumar, Lili Mou, Lukasz Golab, and Olga Vech- tomova. 2020. Iterative edit-based unsupervised sen- tence simplification. In Proceedings of Association for Computational Linguistics, pages 7918-7928.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Controllable text simplification with explicit paraphrasing", |
|
"authors": [ |
|
{ |
|
"first": "Mounica", |
|
"middle": [], |
|
"last": "Maddela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Alva-Manchego", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "NAACL-HLT, Online. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mounica Maddela, Fernando Alva-Manchego, and Wei Xu. 2021. Controllable text simplification with ex- plicit paraphrasing. In NAACL-HLT, Online. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "\u00c9ric Villemonte de la Clergerie, Antoine Bordes, and Beno\u00eet Sagot", |
|
"authors": [ |
|
{ |
|
"first": "Louis", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Humeau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierre-Emmanuel", |
|
"middle": [], |
|
"last": "Mazare", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Workshop on Automatic Text Adaptation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "29--38", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Louis Martin, Samuel Humeau, Pierre-Emmanuel Mazare,\u00c9ric Villemonte de la Clergerie, Antoine Bordes, and Beno\u00eet Sagot. 2018. Reference-less quality estimation of text simplification systems. In Proceedings of the Workshop on Automatic Text Adaptation, pages 29-38.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Teachers College", |
|
"authors": [ |
|
{ |
|
"first": "Anderson", |
|
"middle": [], |
|
"last": "William", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lelah", |
|
"middle": [ |
|
"Mae" |
|
], |
|
"last": "Mccall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Crabbs", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1926, |
|
"venue": "Standard Test Lessons in Reading", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William Anderson McCall and Lelah Mae Crabbs. 1926. Standard Test Lessons in Reading... 5. Teach- ers College, Columbia University, Bureau of Publi- cations.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Hybrid simplification using deep semantics and machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Gardent", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shashi Narayan and Claire Gardent. 2014. Hybrid sim- plification using deep semantics and machine trans- lation. In Proceedings Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Neural versus non-neural text simplification: A case study", |
|
"authors": [ |
|
{ |
|
"first": "Michelle", |
|
"middle": [], |
|
"last": "Islam Nassar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gholamreza", |
|
"middle": [], |
|
"last": "Ananda-Rajah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Haffari", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Workshop of the Australasian Language Technology Association", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "172--177", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Islam Nassar, Michelle Ananda-Rajah, and Gholam- reza Haffari. 2019. Neural versus non-neural text simplification: A case study. In Proceedings of the Workshop of the Australasian Language Technology Association, pages 172-177.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Transforming complex sentences into a semantic hierarchy", |
|
"authors": [ |
|
{ |
|
"first": "Christina", |
|
"middle": [], |
|
"last": "Niklaus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Cetto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3415--3427", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christina Niklaus, Matthias Cetto, Andr\u00e9 Freitas, and Siegfried Handschuh. 2019. Transforming complex sentences into a semantic hierarchy. In Proceedings of Association for Computational Linguistics, pages 3415-3427.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Controllable text simplification with lexical constraint loss", |
|
"authors": [ |
|
{ |
|
"first": "Daiki", |
|
"middle": [], |
|
"last": "Nishihara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomoyuki", |
|
"middle": [], |
|
"last": "Kajiwara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuki", |
|
"middle": [], |
|
"last": "Arase", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of Association for Computational Linguistics: Student Research Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "260--266", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daiki Nishihara, Tomoyuki Kajiwara, and Yuki Arase. 2019. Controllable text simplification with lexical constraint loss. In Proceedings of Association for Computational Linguistics: Student Research Work- shop, pages 260-266.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Exploring neural text simplification models", |
|
"authors": [ |
|
{ |
|
"first": "Sergiu", |
|
"middle": [], |
|
"last": "Nisioi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simone", |
|
"middle": [ |
|
"Paolo" |
|
], |
|
"last": "Sanja\u0161tajner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liviu P", |
|
"middle": [], |
|
"last": "Ponzetto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dinu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "85--91", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sergiu Nisioi, Sanja\u0160tajner, Simone Paolo Ponzetto, and Liviu P Dinu. 2017. Exploring neural text sim- plification models. In Proceedings of Association for Computational Linguistics, pages 85-91.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Proceedings of the 40th Annual Meeting on Association for Computational Linguistics -ACL", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2001. BLEU. In Proceedings of the 40th Annual Meeting on Association for Computational Linguistics -ACL. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Improving neural text simplification model with simplified corpora", |
|
"authors": [ |
|
{ |
|
"first": "Jipeng", |
|
"middle": [], |
|
"last": "Qiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jipeng Qiang. 2018. Improving neural text simpli- fication model with simplified corpora. CoRR, abs/1810.04428.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Learning simplifications for specific target audiences", |
|
"authors": [ |
|
{ |
|
"first": "Carolina", |
|
"middle": [], |
|
"last": "Scarton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "712--718", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Carolina Scarton and Lucia Specia. 2018. Learning simplifications for specific target audiences. In Pro- ceedings of Association for Computational Linguis- tics, pages 712-718.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "A survey of automated text simplification", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Shardlow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "International Journal of Advanced Computer Science and Applications", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Shardlow. 2014. A survey of automated text simplification. International Journal of Advanced Computer Science and Applications.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Tools for assessing readability and quality of health-related web sites", |
|
"authors": [ |
|
{ |
|
"first": "Randi", |
|
"middle": [], |
|
"last": "Shedlosky-Shoemaker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amy", |
|
"middle": [ |
|
"Curry" |
|
], |
|
"last": "Sturm", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Muniba", |
|
"middle": [], |
|
"last": "Saleem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kimberly", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Kelly", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Journal of genetic counseling", |
|
"volume": "18", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Randi Shedlosky-Shoemaker, Amy Curry Sturm, Mu- niba Saleem, and Kimberly M Kelly. 2009. Tools for assessing readability and quality of health-related web sites. Journal of genetic counseling, 18(1):49.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Bleu is not suitable for the evaluation of text simplification", |
|
"authors": [ |
|
{ |
|
"first": "Elior", |
|
"middle": [], |
|
"last": "Sulem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omri", |
|
"middle": [], |
|
"last": "Abend", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ari", |
|
"middle": [], |
|
"last": "Rappoport", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "738--744", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elior Sulem, Omri Abend, and Ari Rappoport. 2018. Bleu is not suitable for the evaluation of text sim- plification. In Proceedings of Empirical Methods in Natural Language Processing, pages 738-744.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Sentence simplification with memoryaugmented neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Tu", |
|
"middle": [], |
|
"last": "Vu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Baotian", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tsendsuren", |
|
"middle": [], |
|
"last": "Munkhdalai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "79--85", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tu Vu, Baotian Hu, Tsendsuren Munkhdalai, and Hong Yu. 2018. Sentence simplification with memory- augmented neural networks. In Proceedings of North American Chapter of the Association for Com- putational Linguistics: Human Language Technolo- gies, pages 79-85.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Learning to simplify sentences with quasi-synchronous grammar and integer programming", |
|
"authors": [ |
|
{ |
|
"first": "Kristian", |
|
"middle": [], |
|
"last": "Woodsend", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "409--420", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kristian Woodsend and Mirella Lapata. 2011. Learn- ing to simplify sentences with quasi-synchronous grammar and integer programming. In Proceedings of Empirical Methods in Natural Language Process- ing, pages 409-420.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Sentence simplification by monolingual machine translation", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sander Wubben", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Van Den", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emiel", |
|
"middle": [], |
|
"last": "Bosch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Krahmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1015--1024", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sander Wubben, Antal van den Bosch, and Emiel Krah- mer. 2012. Sentence simplification by monolingual machine translation. In Proceedings of the 50th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1015- 1024.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Problems in current text simplification research: New data can help. Transactions of the Association of Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Courtney", |
|
"middle": [], |
|
"last": "Napoles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Xu, Chris Callison-Burch, and Courtney Napoles. 2015. Problems in current text simplification re- search: New data can help. Transactions of the As- sociation of Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Optimizing statistical machine translation for text simplification", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Courtney", |
|
"middle": [], |
|
"last": "Napoles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quanze", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "401--415", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Xu, Courtney Napoles, Ellie Pavlick, Quanze Chen, and Chris Callison-Burch. 2016. Optimizing statistical machine translation for text simplification. Transactions of the Association for Computational Linguistics, 4:401-415.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Sentence simplification with deep reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "Xingxing", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "584--594", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xingxing Zhang and Mirella Lapata. 2017. Sentence simplification with deep reinforcement learning. In Proceedings of Empirical Methods in Natural Lan- guage Processing, pages 584-594.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "A monolingual tree-based translation model for sentence simplification", |
|
"authors": [ |
|
{ |
|
"first": "Zhemin", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Delphine", |
|
"middle": [], |
|
"last": "Bernhard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of ICCL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhemin Zhu, Delphine Bernhard, and Iryna Gurevych. 2010. A monolingual tree-based translation model for sentence simplification. In Proceedings of ICCL.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "FKGL scores (smaller is better) from the experiments on the Dress-Ls test output," |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "BLEU scores (larger is better) from the experiments on the Dress-Ls test output, averaged over 100 runs." |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "SARI scores (larger is better) from the experiments on the Dress-Ls test output, averaged over 100 runs." |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "Experimental results (FKGL, BLEU and SARI scores) for 10%, 50% and 100% of the sentences being modified on three systems: Dress-Ls, EncDecA and Hybrid.", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "Breakdown of the components making up BLEU and SARI scores for the original Dress-Ls output and the modified texts.", |
|
"num": null |
|
}, |
|
"TABREF5": { |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "Post-hoc statistics for original and reference data from the test corpus and five system outputs.", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |