|
{ |
|
"paper_id": "C18-1034", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:10:54.646354Z" |
|
}, |
|
"title": "A Nontrivial Sentence Corpus for the Task of Sentence Readability Assessment in Portuguese", |
|
"authors": [ |
|
{ |
|
"first": "Sidney", |
|
"middle": [ |
|
"Evaldo" |
|
], |
|
"last": "Leal", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of S\u00e3o Paulo Av. do Trabalhador Saocarlense", |
|
"location": { |
|
"postCode": "400", |
|
"settlement": "S\u00e3o Carlos -SP", |
|
"country": "Brazil" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Magali", |
|
"middle": [], |
|
"last": "Sanches Duran", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of S\u00e3o Paulo Av. do Trabalhador Saocarlense", |
|
"location": { |
|
"postCode": "400", |
|
"settlement": "S\u00e3o Carlos -SP", |
|
"country": "Brazil" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [ |
|
"Maria" |
|
], |
|
"last": "Alu\u00edsio", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of S\u00e3o Paulo Av. do Trabalhador Saocarlense", |
|
"location": { |
|
"postCode": "400", |
|
"settlement": "S\u00e3o Carlos -SP", |
|
"country": "Brazil" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Effective textual communication depends on readers being proficient enough to comprehend texts, and texts being clear enough to be understood by the intended audience, in a reading task. When the meaning of textual information and instructions is not well conveyed, many losses and damages may occur. Among the solutions to alleviate this problem is the automatic evaluation of sentence readability, task which has been receiving a lot of attention due to its large applicability. However, a shortage of resources, such as corpora for training and evaluation, hinders the full development of this task. In this paper, we generate a nontrivial sentence corpus in Portuguese. We evaluate three scenarios for building it, taking advantage of a parallel corpus of simplification, in which each sentence triplet is aligned and has simplification operations annotated, being ideal for justifying possible mistakes of future methods. The best scenario of our corpus PorSimplesSent is composed of 4,888 pairs, which is bigger than a similar corpus for English; all the three versions of it are publicly available. We created four baselines for PorSimplesSent and made available a pairwise ranking method, using 17 linguistic and psycholinguistic features, which correctly identifies the ranking of sentence pairs with an accuracy of 74.2%.", |
|
"pdf_parse": { |
|
"paper_id": "C18-1034", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Effective textual communication depends on readers being proficient enough to comprehend texts, and texts being clear enough to be understood by the intended audience, in a reading task. When the meaning of textual information and instructions is not well conveyed, many losses and damages may occur. Among the solutions to alleviate this problem is the automatic evaluation of sentence readability, task which has been receiving a lot of attention due to its large applicability. However, a shortage of resources, such as corpora for training and evaluation, hinders the full development of this task. In this paper, we generate a nontrivial sentence corpus in Portuguese. We evaluate three scenarios for building it, taking advantage of a parallel corpus of simplification, in which each sentence triplet is aligned and has simplification operations annotated, being ideal for justifying possible mistakes of future methods. The best scenario of our corpus PorSimplesSent is composed of 4,888 pairs, which is bigger than a similar corpus for English; all the three versions of it are publicly available. We created four baselines for PorSimplesSent and made available a pairwise ranking method, using 17 linguistic and psycholinguistic features, which correctly identifies the ranking of sentence pairs with an accuracy of 74.2%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Um Corpus N\u00e3o Trivial de Senten\u00e7as para a Tarefa de Avalia\u00e7\u00e3o de Complexidade Sentencial em Portugu\u00eas Uma comunica\u00e7\u00e3o textual eficaz depende de os leitores serem proficientes o suficiente para compreenderem o texto e de o texto ser claro o suficiente para ser compreendido pelo p\u00fablico-alvo, em uma tarefa de leitura. Quando o significado das informa\u00e7\u00f5es e instru\u00e7\u00f5es textuais n\u00e3o \u00e9 bem transmitido, muitas perdas e danos podem ocorrer. Entre as solu\u00e7\u00f5es para aliviar este problema est\u00e1 a avalia\u00e7\u00e3o autom\u00e1tica da complexidade sentencial, tarefa que vem recebendo muita aten\u00e7\u00e3o devido a sua grande aplicabilidade. No entanto, a escassez de recursos, como corpora para treinamento e avalia\u00e7\u00e3o, dificulta o pleno desenvolvimento dessa tarefa. Neste artigo, geramos um corpus de senten\u00e7as n\u00e3o triviais em Portugu\u00eas. Avaliamos tr\u00eas cen\u00e1rios para constru\u00ed-lo, aproveitando um corpus paralelo de simplifica\u00e7\u00e3o textual, no qual cada trio de senten\u00e7as est\u00e1 alinhado e possui opera\u00e7\u00f5es de simplifica\u00e7\u00e3o anotadas, sendo ideal para justificar poss\u00edveis erros de m\u00e9todos futuros. O nosso melhor cen\u00e1rio do corpus PorSimplesSent \u00e9 composto por 4.888 pares, que \u00e9 maior que um corpus similar para o ingl\u00eas; todas as tr\u00eas vers\u00f5es do corpus PorSimplesSent est\u00e3o disponibilizadas publicamente. Criamos quatro m\u00e9tricas baselines para o PorSimplesSent e um m\u00e9todo de ranqueamento por pares, utilizando 17 m\u00e9tricas lingu\u00edsticas e psicolingu\u00edsticas, que identificam corretamente o ranqueamento dos pares de senten\u00e7as com uma acur\u00e1cia de 74.2%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Title and Abstract in Portuguese", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Readability is an issue of great social and economic impact. Effective textual communication depends on readers being proficient enough to comprehend texts, and texts being clear enough to be understood by the intended audience. When the meaning of textual information and instructions is not well conveyed, many losses and damages may occur (Dubay, 2007) . In Brazil, for example, only 8% of adult population has reading proficiency (IPM, 2016) . The situation is worse in the agriculture and livestock sectors, where only 1% of the surveyed are proficient readers. For this reason, most of rural producers do not have access to new technologies, undermining the development of agribusiness, which accounts for 22% of gross internal product and 30% of Brazilian jobs 1 . Research investments in these sectors, therefore, do not cause as much impact as they potentially might. Identifying which sentences of a text are more complex may help writers of newsletters, manuals and instructions, for example, to adequate their texts to their audiences. Among the solutions to alleviate this problem is the simplification or adaptation of complex texts, a task that has been partially or fully automatized by Natural Language Processing (NLP) applications. For Brazilian Portuguese, various applications, methods and resources aiming to support simplification in several levels of readability were developed in the Project PorSimples . Among these resources there is a parallel and aligned corpus with two levels of simplification and annotated simplification operations (Caseli et al., 2009) . PorSimples corpus has been used to train readability classifiers for texts (Scarton et al., 2010) . Table 1 shows examples of an original sentence of PorSimples corpus (O), its natural simplification (N) and its strong simplification (S). The natural simplification had a substitution of \"Uma parcela\" by \"Alguns\" and the strong simplification, shorter than the natural, had a clause removed.", |
|
"cite_spans": [ |
|
{ |
|
"start": 342, |
|
"end": 355, |
|
"text": "(Dubay, 2007)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 434, |
|
"end": 445, |
|
"text": "(IPM, 2016)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 1565, |
|
"end": 1586, |
|
"text": "(Caseli et al., 2009)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1664, |
|
"end": 1686, |
|
"text": "(Scarton et al., 2010)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1689, |
|
"end": 1696, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "(O) Uma parcela critica o uniforme, porque acredita que ele amea\u00e7aria a individualidade de cada um. (One parcel criticizes the uniform, because it believes that it would threaten the individuality of each one.) (N) Alguns criticam o uniforme, porque acreditam que ele amea\u00e7a a individualidade de cada um. (Some criticize the uniform because they believe that it threatens the individuality of each one.) (S) Alguns acreditam que o uniforme amea\u00e7a a individualidade de cada um. (Some believe that the uniform threatens the individuality of each one.) Table 1 : Examples of simplification in PorSimples.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 550, |
|
"end": 557, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "However, we know that even complex texts have simple sentences, what makes it difficult to identify precisely where complexity lies. In an automatic simplification task, as well, it is difficult to decide which sentence is complex and requires simplification. To address these difficulties, a new task has received attention recently: the prediction of sentences readability, also known by sentence-based readability or sentential complexity task. The first studies on this subject emerged in the beginning of the last decade (Dell'Orletta et al., 2011; Sj\u00f6holm, 2012; Del'Orletta et al., 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 526, |
|
"end": 553, |
|
"text": "(Dell'Orletta et al., 2011;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 554, |
|
"end": 568, |
|
"text": "Sj\u00f6holm, 2012;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 569, |
|
"end": 594, |
|
"text": "Del'Orletta et al., 2014)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This task may support simplification systems at least in three applications: (i) to evaluate whether the simplification of a sentence (manual or automatic) is truly simpler than the original sentence or not; (ii) to inform the level of complexity of an original sentence; (iii) to rank the results of several simplification methods, according to their level of complexity. Besides supporting text simplification applications, computer-aided language learning (CALL) systems can benefit from sentence-level readability methods to predict which sentences of a text the students will struggle to read. Furthermore, Open Educational Resources repositories Wiley et al. (2014) may also take profit of such methods in order to return not merely relevant educational resources, but documents appropriate to the reading level of the user.", |
|
"cite_spans": [ |
|
{ |
|
"start": 652, |
|
"end": 671, |
|
"text": "Wiley et al. (2014)", |
|
"ref_id": "BIBREF45" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Due to its several applications, sentential complexity has been a focus of interest in the NLP studies in recent years, such as Vajjala and Meurers (2014) , Vajjala and Meurers (2016) , Ambati et al. (2016) , Singh et al. (2016) , Howcroft and Demberg (2017), Gonzalez-Gardu\u00f1o and S\u00f8gaard (2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 154, |
|
"text": "Vajjala and Meurers (2014)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 183, |
|
"text": "Vajjala and Meurers (2016)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 186, |
|
"end": 206, |
|
"text": "Ambati et al. (2016)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 209, |
|
"end": 228, |
|
"text": "Singh et al. (2016)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 260, |
|
"end": 295, |
|
"text": "Gonzalez-Gardu\u00f1o and S\u00f8gaard (2017)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The lack of a sentence-based corpus annotated with regards to readability is a major obstacle to research in this area for Portuguese. Even the English language suffers some drawbacks in what concerns the evaluation of sentential complexity. One of them is the use of benchmarks built from adapted corpora which are automatically aligned, such as Wikipedia and Simple Wikipedia (Zhu et al., 2010) . This corpus has some problems to be used as benchmark for text simplification which also prevents its use for the sentential complexity task, for example, automatic sentence alignment errors, inadequate simplifications generating sentences which are not simple, and poor generalization for other genre than encyclopedia (Xu et al., 2015) . Other benchmarks for sentential complexity, such as OneStopEnglish corpus (Vajjala and Meurers, 2016) , have several positive points -the use of news articles which generalize better for other genres, not having sentence length as high predictive feature, as well as being available by requisition -but also can suffer from errors generated by automatic alignment. Newsela parallel corpus (cf. (Xu et al., 2015) ), composed of news articles rewritten by professional editors to be read for children at multiple grade levels, is very beneficial for studying text simplification and could serve as benchmark for sentential complexity if the resulting sentence corpus could be publicly available. Moreover, Scarton et al. (2018) made available the SimPA, an English sentence level corpus for the Public Administration domain with 1,100 original sentences simplified in the lexical (3,300 pairs) and syntactic levels (another 1,100 pairs), annotated by 176 volunteers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 378, |
|
"end": 396, |
|
"text": "(Zhu et al., 2010)", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 719, |
|
"end": 736, |
|
"text": "(Xu et al., 2015)", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 813, |
|
"end": 840, |
|
"text": "(Vajjala and Meurers, 2016)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 1133, |
|
"end": 1150, |
|
"text": "(Xu et al., 2015)", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 1443, |
|
"end": 1464, |
|
"text": "Scarton et al. (2018)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we aim at obtaining nontrivial sentence pairs in Portuguese in order to create a gold standard corpus, publicly available. By nontrivial we mean that the pairs are not significantly different in length to avoid the easy judgment that the shorter sentences are the simpler ones. Although it is natural to expect that the simplified sentences are smaller, we found that it is not always true. An example of this is when, in order to simplify a content, one inserts an explanation, examples, or a list of synonyms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We evaluated three scenarios for building our gold standard corpus from PorSimples corpus, with special care for the split operation, because splitting can generate several short sentences from an original one. The first scenario is a corpus formed of pairs of original and simplified sentences in which, if the split operation is used, we repeat the original sentence to form pairs with each of the simplified sentences. In the second scenario we include pairs with all but the simplified sentences from the split operation. The last scenario is a corpus in which all simplification operations are allowed, but for splitting we only bring the longest simplified sentences to compose the pair original-simplified.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The remainder of this paper is organized as follows. Section 2 reviews the literature on sentence-based readability assessment and its evaluation corpora. Section 3 presents the parallel and aligned corpus of the PorSimples project and explains how we built three evaluation scenarios to create the PorSimplesSent, our corpus for sentence-based readability assessment in Portuguese. In Section 4 we discuss our baselines, our method and features extracted to evaluate the three evaluation scenarios. Conclusions and future work are presented in Section 5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Initially, sentence-based readability task was considered in isolation by several authors, each one studying a set of features and evaluating in specific corpora. Dell'Orletta et al. 2011were the first to consider the task of complexity for the sentential level, comparing its difficulty in relation to the textual level, for Italian. They used the SVM method of the LIBSVM library to train a model with 7,000 sentences, half selected in the newspaper La Repubblica and half of the newspaper Due Parole, the latter considered simple reading. Interestingly, features at the syntactic level had little influence on the classification of documents, but were very important for the sentential level. Training with 6,000 and testing against 1,000 sentences, they reached 78.2% accuracy at the sentential level. Sj\u00f6holm (2012) addressed the task for the Swedish, also using two sets of sentences. For evaluation, 3,500 sentences were taken from the Swedish corpus L\u00e4SBarT, considered simple, and 3,500 from the GP2006 (G\u00f6teborgsposten journal), considered complex, divided into seven parts, each part used for testing with the model trained in the other six. The best method was Sequential Minimal Optimization (SMO), which reached 83% accuracy. It is important to mention that using the same set of features to evaluate documents (simple and complex) instead of sentences, in the same corpus, they obtained 97% accuracy. Dell'Orletta et al. (2014) returned to the task, addressing the issue of textual genres. They used the same sets of features from the previous article (Dell 'Orletta et al., 2011) , but now adding three new corpora of different genres to the original journalistic genre: literary, didactic and scientific.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1573, |
|
"end": 1595, |
|
"text": "'Orletta et al., 2011)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence-based Readability Assessment and its Evaluation Corpora", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Vajjala and Meurers (2014) made the first evaluation using Wikipedia-Simple Wikipedia corpus, automatically aligned by Zhu et al. (2010) . This corpus became the most-used resource for sentential complexity evaluation in the English language. It was created with the matching of the sentences of 65,133 articles of Simple Wikipedia and Wikipedia, using the measure TF-IDF with cosine similarity. For the choice of the alignment measure, they evaluated the performance of three similarity measures: TF-IDF, word overlap and Minimum Edit Distance (MED), against 120 pairs of manually annotated sentences. The accuracy of TF-IDF was above 90%. As a final result, they created 108,016 aligned sentences, annotated in two classes: complex or simple, and a complex sentence may be mapped to one or more simple sentences to handle sentence splitting. This corpus was updated by Hwang et al. (2015) , reaching 150,000 pairs of aligned sentences. Table 2 shows the state-of-the-art (SotA) results we were able to compile, which use Wikipedia-Simple Wikipedia corpus. In the table, the name of each study is listed with the method/baseline used and the accuracy results. Vajjala and Meurers (2014) trained a SMO regression model for document complexity, which reached about 90% accuracy. They then applied the model at the sentence level, and even testing in datasets of several sizes, they only achieved 66% accuracy, creating a new baseline for the task. They concluded the reason for this low accuracy lies in the incorrect assumption that all Wikipedia sentences are more complex than Simple Wikipedia. Even so, this dataset has been used by several studies of sentence readability. As far as we could see, Gonzalez-Gardu\u00f1o and S\u00f8gaard (2017) presents the state-of-the-art for the task, using eye-tracking features together with linguistic and psycholinguistic ones. Vajjala and Meurers (2016) returned to the task, proposing a new method for evaluating paired sentences based on ranking. They contributed with a new corpus of English sentences aligned in three levels, called OneStopEnglish (OSE), used for training and testing. The OSE corpus is a corpus of aligned sentences created from articles rewritten by teaching experts for English language learners at three reading levels (elementary, intermediate, advanced) . They used 76 triplets of articles published between 2012 and 2014, resulting in a total of 837 written sentences with three levels (OSE3). For the alignment, TF-IDF and cosine similarity were used, with values above of 0.7. In addition to OSE3, a second corpus (OSE2) was compiled, which resulted in 3,113 sentence pairs: elementary-intermediate, intermediate-advanced, and elementary-advanced. This corpus was divided in two parts: 65% of pairs for training and the rest for testing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 136, |
|
"text": "Zhu et al. (2010)", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 871, |
|
"end": 890, |
|
"text": "Hwang et al. (2015)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 1861, |
|
"end": 1887, |
|
"text": "Vajjala and Meurers (2016)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 2278, |
|
"end": 2314, |
|
"text": "(elementary, intermediate, advanced)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 938, |
|
"end": 945, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sentence-based Readability Assessment and its Evaluation Corpora", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In addition to significantly improving the accuracy of the task (over 80%), they assessed the impact of linguistic (lexical, syntactic, morphosyntactic) and psycholinguistic features, confirming the importance of eight features in OSE2: AoA (Age of acquisition), CTTR (corrected Type-token ratio), number of subtrees, average length of clause, average word imagery rating, average word familiarity rating, average Colorado meaningfulness rating of words, average concreteness rating. It is important to note that sentence length was not predictive in OSE2 corpus, as in this dataset rewriting and paraphrasing were the most used simplification operations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence-based Readability Assessment and its Evaluation Corpora", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "As may be seen in Section 3, for our corpus, traditional psycholinguistic features such as AoA, imagery, concreteness, familiarity, have not been used to rank the three types of sentence pairs of PorSimplesSent. We have, indeed, analyzed their contribution to distinguish the three sentence levels, using the resource created by Santos et al. (2017) . However, the results were not discriminative. We hypothesize two reasons for this. One of them is related to characteristics of the resource, which has been created automatically based on existing psycholinguistic norms and may contain some bias. The other reason is related to characteristics of the corpus. The corpus PorSimples contain a lot of explanation relating to difficult words (this is a simplification strategy to deal with lexical complexity). However, once explained, the difficult words are repeated along the text. In PorSimplesSent, when there is a split operation, the explanations remain isolated, benefiting only the sentence they appear, whereas the other sentences containing the repetitions of difficult words remain lexically complex. In fact, the psycholinguistic features did not perform well in our corpus and, therefore, they were not chosen as best features for our method. Table 3 shows SotA results we were able to compile, which use OSE2 corpus, automatically aligned by Vajjala and Meurers (2016) . In the table, the name of each study is listed with the method used and the accuracy results, separated by OSE2 subcorpus. OSE(A-E) stands for pairs at the levels Advanced and Elementary; OSE(A-I) for pairs at Advanced and Intermediate levels; OSE(I-E) for Intermediate and Elementary, and OSE(All) for all three pairs. Howcroft and Demberg (2017) joined the subcorpus OSE(A-I) and OSE(I-E), calling it OSE near .", |
|
"cite_spans": [ |
|
{ |
|
"start": 329, |
|
"end": 349, |
|
"text": "Santos et al. (2017)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1355, |
|
"end": 1381, |
|
"text": "Vajjala and Meurers (2016)", |
|
"ref_id": "BIBREF44" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1255, |
|
"end": 1262, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sentence-based Readability Assessment and its Evaluation Corpora", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Method Vajjala and Meurers (2016) explored whether the types of simplification operations are different between Advanced sentences simplified to Intermediate, and Intermediate sentences simplified to Elementary, using OSE3 corpus. That is why we don't have explicit evaluation between these pairs nor between Advanced and Elementary sentence pairs in Table 3. 3 PorSimplesSent Corpus", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 351, |
|
"end": 359, |
|
"text": "Table 3.", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Study", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "OSE(A-E) OSE(A-I) OSE(I-E) OSE(All)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Study", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In order to create the PorSimplesSent, our corpus for sentence-based readability assessment in Portuguese, and to train and evaluate methods to predict sentential complexity for this language, we took advantage of PorSimples corpus (Caseli et al., 2009; .", |
|
"cite_spans": [ |
|
{ |
|
"start": 232, |
|
"end": 253, |
|
"text": "(Caseli et al., 2009;", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "PorSimples Corpus", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "PorSimples corpus consists of 2,915 original sentences simplified into two levels of complexity:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "PorSimples Corpus", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Natural and Strong. All the sentences are from informational texts, being 30% of scientific issues from newspaper Folha de S\u00e3o Paulo 2 and 70% of other issues from newspaper Zero Hora 3 . PorSimples corpus contains complete annotation of each operation made during the simplification process. This was facilitated by the Simplification Annotation Editor, developed in PorSimples project (Caseli et al., 2009) . The editor allows the human simplifier to register decisions of lexical and syntactic simplifications, which include substituting words, merging and splitting sentences, deleting part of the sentence, rewriting sentences with other words, and changing constituents order. The editor has a list of operations that may be chosen by the human simplifier. Simplification process in PorSimples was instructed by simplification guidelines, advising how to turn sentences simpler (Specia et al., 2008) . Examples show how to tackle with complex structures, like apposition, subordinate clauses, clauses initiated by non-finite verbs, passive voice, inversion of constituents order and embedded clauses.", |
|
"cite_spans": [ |
|
{ |
|
"start": 387, |
|
"end": 408, |
|
"text": "(Caseli et al., 2009)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 884, |
|
"end": 905, |
|
"text": "(Specia et al., 2008)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "PorSimples Corpus", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In a totally annotated process, the alignment between the simplified sentences and their respective simplifications is systematically ensured. This ensured alignment, added to the fact that the corpus contains a large variety of simplification strategies, makes PorSimples a unique corpus, entirely appropriate to evaluate readability predictors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "PorSimples Corpus", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We created 4,968 pairs and 1,141 triplets of sentences, combining the three levels of PorSimples corpus: Original, Natural and Strong. Pairs and triplets have two or three different sentences aligned, being the Original the more complex in Original-Natural and Original-Strong pairs, and Natural the more complex in Natural-Strong pairs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In theory, there should be 8,745 pairs (an original-natural, an original-strong and a naturalstrong pairing for each of the 2,915 sentences) and 2,915 triplets (original-natural-strong). However, it occurred 3,777 pairs and 1,774 triplets containing at least two identical sentences, because some of the sentences were simplified only in one level or were not simplified at all (they were considered originally simple). Such pairs and triplets were removed from the corpus, which remained with 4,968 pairs and 1,141 triplets. Table 4 shows what happened with the original sentences of the texts during the simplification process that gave origin to PorSimples corpus. Part of the sentences has not been simplified, possibly because the sentences were considered already simple. The other part is composed of the simplified sentences, which followed one of three possible paths: simplification in both levels (Natural and Strong) or in only one of them (Natural or Strong). Additionally, in the PorSimples corpus, 3,873 sentences were simplified into two or more sentences, generating 5,938 sentences, distributed as shown in Table 5 . The split leads to an increase of 53% in the overall quantity of simplified sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 526, |
|
"end": 533, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 1125, |
|
"end": 1132, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Each of the resulting sentences is obviously simpler than the split sentence, however, differently from the other pairs, the sentences deriving from split are part and not an integral simplified version of the respective simplified sentence. To evaluate the effect of splitting on the accuracy of Original/ Natural 2,372 1,543 829 1,992 3,535 49% Natural/ Strong 1,501 782 719 1,621 2,403 60% TOTAL 3,873 2,325 1,548 3,613 5,938 53% Table 5 : Distribution of sentences increase due to split.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 297, |
|
"end": 433, |
|
"text": "Original/ Natural 2,372 1,543 829 1,992 3,535 49% Natural/ Strong 1,501 782 719 1,621 2,403 60% TOTAL 3,873 2,325 1,548", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 450, |
|
"end": 457, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Application of Simplification", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "the complexity assessment task, we created three versions of PorSimplesSent. The three versions are very similar, as they pair all the sentences with their respective simplified sentences. They differ in what concerns split sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Application of Simplification", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As we can see in Table 6 , the first version, PorSimplesSent1, has 10,616 pairs, including a pair for each sentence resulting from split. The second version, PorSimplesSent2, has 4,968 pairs and, for split sentences, selects only the simplification with greatest score after applying a linear combination of total number of words and word overlapping count, as exemplified in the following. The third version, PorSimplesSent3, disregard all the split sentences and has 2,600 pairs.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 17, |
|
"end": 24, |
|
"text": "Table 6", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Application of Simplification", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "PorSimplesSent1 For example, given an Original sentence (O) simplified into two sentences in Natural level (N1 and N2):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Types of Pairs", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 (O): O dormit\u00f3rio, de aproximadamente cinco metros por cinco metros, completa-se com um guarda-roupas de duas portas, uma mesa, um frigobar e um aparelho de ar-condicionado. (The dormitory, approximately five meters by five meters, is complete with a two-door wardrobe, a table, a minibar and an air-conditioner.)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Types of Pairs", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 (N1): O dormit\u00f3rio tem mais ou menos cinco metros por cinco metros. (length: 11 words; overlapping: 7 words; score: 11+7=18) (The dormitory is about five meters by five meters.)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Types of Pairs", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 (N2): O dormit\u00f3rio se completa com um guarda-roupas de duas portas, uma mesa, um frigobar e um aparelho de ar-condicionado. (length: 19 words; overlapping: 19 words; score: 19+19=38) (The dormitory is complete with a two-door wardrobe, a table, a minibar and an air-conditioning unit.)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Types of Pairs", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For PorSimplesSent1, we generated 2 pairs: O-N1 and O-N2. For PorSimplesSent2, we generated 1 pair: O-N2. The original was paired with the sentence N2, which presented a score of 38, against a score of 18 of the sentence N1. For PorSimplesSent3 we did not generate any pair with these sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Types of Pairs", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To validate the corpus and to contribute with an initial baseline for the task in Portuguese, we evaluated a simple, but successful approach, inspired by Vajjala and Meurers (2016) - the pair-wise ranking. For sentential complexity, each sentence should receive a score from an ordinal list of complexity, which could be 1 to n, being n the most difficult. Once the ranking method receives a pair of sentences (with feature vectors) it will predict which one is simpler than the other. The problem of sentential complexity is reduced to the comparison of sentences pairs taken from a pool of sentences where the objective is to rank them according to their complexity, trying to minimize inversion of ranks. As these authors, we also chose the RankSVM algorithm implemented in SVM Rank (Joachims, 2006) 4 , which presented the best results among the algorithms tested for the task in English. We gave the rank value 2 to the complex side and value 1 to the simplified side of each sentence pair.", |
|
"cite_spans": [ |
|
{ |
|
"start": 154, |
|
"end": 182, |
|
"text": "Vajjala and Meurers (2016) -", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 786, |
|
"end": 802, |
|
"text": "(Joachims, 2006)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 803, |
|
"end": 804, |
|
"text": "4", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpus Validation 4.1 Method", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For this experiment, we evaluated previously the sets of Original, Natural and Strong simplified sentences of PorSimples Corpus, using two publicly available NLP tools for Portuguese to extract textual metrics, which can be used to aid the automated analysis of text readability: Coh-Metrix-Port 2.0 5 (Scarton et al., 2010; and Coh-Metrix-Dementia 6 (Alu\u00edsio et al., 2016) , both based on Coh-Metrix (Graesser et al., 2004) . Also, we were inspired by another tool named AIC 7 , built in PorSimples project which defined several syntactic metrics to be used in evaluation of text readability. Then we chose the 17 features that presented a clear tendency (increase or decrease, depending on the feature) in the three levels compared (see Table 7 and 8) in order to train a predictor. Table 7 shows mean values of syntactic metrics for Original (O), Natural (N) and Strong (S) sentence levels in PorSimples corpus. In the Table 9 : Baselines and first experiment results (accuracy), using SVMRank.", |
|
"cite_spans": [ |
|
{ |
|
"start": 302, |
|
"end": 324, |
|
"text": "(Scarton et al., 2010;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 351, |
|
"end": 373, |
|
"text": "(Alu\u00edsio et al., 2016)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 401, |
|
"end": 424, |
|
"text": "(Graesser et al., 2004)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 739, |
|
"end": 746, |
|
"text": "Table 7", |
|
"ref_id": "TABREF9" |
|
}, |
|
{ |
|
"start": 785, |
|
"end": 792, |
|
"text": "Table 7", |
|
"ref_id": "TABREF9" |
|
}, |
|
{ |
|
"start": 922, |
|
"end": 929, |
|
"text": "Table 9", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In PorSimplesSent1, as expected, using just the number of tokens per sentence it is possible to achieve more than 80% of accuracy. This is because this dataset includes all sentences that are result of split operations, so the majority of simplified sentences are small parts from the original ones. The PorSimplesSent3, which has only full sentences, disregarding those that suffered split, is the most difficult to rank. Besides having the smallest number of pairs, PorSimplesSent3 has some simplified sentences that are bigger than the original ones. The PorSimplesSent2, on its turn, is a middle term between the previous two: it has split sentences, but only the longest sentence derived from the split is paired with the original sentence. Therefore, we have chosen the dataset PorSimplesSent2 to be our gold standard for sentential complexity task in Portuguese.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Our model with 17 features presents improvement over the strongest baseline (Tokens per Sentence): 2.65 in PorSimplesSent1, achieving 83.39% accuracy; 4.85 in PorSimplesSent2, achieving 74.20% accuracy; and 12.91 in PorSimplesSent3, achieving 53.67% accuracy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We performed a manual analysis, trying to understand the errors made by our model, in order to improve it with new features. Building on the syntactic and lexical operations used to annotated the PorSimples corpus, but now with focus on operations at the sentence level, we proposed a set of 14 labels to annotate the errors. Table 10 shows the errors found after this analysis.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 326, |
|
"end": 334, |
|
"text": "Table 10", |
|
"ref_id": "TABREF12" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Qty % We annotated 209 of the 418 sentence pairs of PorSimplesSent3 for which our model missed the prediction. The annotation performed by two annotators was double blind and multi-label. A discussion on the pairs presenting annotation disagreement helped to clarify doubts on the annotation process and to assign commonly agreed labels. After that, the remaining sentence pairs were divided into two parts and each part was assigned to only one annotator.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label Description", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The analysis of these numbers lead us to cogitate which features and metrics might be significant to improve the performance of our ranking model, initially trained with 17 linguistic and psycholinguistic features. Both most frequent labels, 1 and 3, relate to lexical substitution. Example 1 below shows a pair of sentences annotated only with the label 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label Description", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Example 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label Description", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 (O): Quem \u00e9 contra diz que os c\u00e3es sujam a praia e colocam em risco a sa\u00fade dos veranistas. (Those who are against say that the dogs dirty the beach and put at risk the health of the vacationers.)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label Description", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 (N): Quem \u00e9 contra diz que os c\u00e3es sujam a praia e colocam em risco a sa\u00fade das pessoas. (Those who are against say that the dogs dirty the beach and put at risk the health of the people.)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label Description", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The only difference between the two sentences is the pair of words \"veranistas\" versus \"pessoas\", in a hyponym relationship. Example 2 brings a pair annotated with label 3. It shows 2 substitutions by paraphases, here understood as a word replaced by several ones, similar in meaning: \"possibilitar\" by \"tornar\u00e1 poss\u00edvel\" and \"hep\u00e1tica\" by \"do f\u00edgado\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label Description", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Example 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label Description", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 (O): A descoberta possibilitar\u00e1 que pessoas com dano no f\u00edgado usem as pr\u00f3prias c\u00e9lulastronco para produzir c\u00e9lulas hep\u00e1ticas. (The discovery will enable people with liver damage to use their own stem cells to produce hepatic cells.)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label Description", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 (N): A descoberta tornar\u00e1 poss\u00edvel que pessoas com dano no f\u00edgado usem as pr\u00f3prias c\u00e9lulastronco para produzir c\u00e9lulas do f\u00edgado. (The discovery will make it possible for people with liver damage to use their own stem cells to produce liver cells.)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label Description", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As many sentence pairs differ by only one word, readability measures to compare words are essential to decide which is the easiest sentence. Word frequency and psycholinguistic properties of words (as age of acquisition, familiarity, concreteness, imageability) may be useful for this purpose. Additionally, there are several resources that may be used to design new metrics to deal with similar words and paraphases. For Portuguese, there are different similar projects of wordnets, among which stand out the OpenWordNet-PT (de Paiva et al., 2012) , as the most complete with manual revision, and the CONTO.PT (Gon\u00e7alo Oliveira, 2016), built semiautomatically in order to comprise a greater number of words, and which describes itself as a diffuse wordnet. There is also the PPDB (Paraphrase Database), a resource that contains paraphrases in several languages, including Portuguese, automatically extracted from bilingual corpora (Ganitkevitch and Callison-Burch, 2014) . Paraphrase in the context of PPDB refers to expressions or equivalent words. As it was generated automatically, the PPDB also contains some false positives. The resource is available in six different sizes: the difference is that larger sets extracted paraphrase rules with less confidence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 525, |
|
"end": 548, |
|
"text": "(de Paiva et al., 2012)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 932, |
|
"end": 971, |
|
"text": "(Ganitkevitch and Callison-Burch, 2014)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label Description", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For features other than the lexical ones, a very promising research avenue is to test simplified sentences with human readers to confirm whether they are simpler than their original counterparts or not (using eye-trackers). This is relevant because many simplification operations we use are inspired in the literature regarding English language simplification and we need more evidence related to Portuguese language. The error analysis, therefore, provided important insights for future work aiming to increase the accuracy of our model in the dataset made available with this paper. Besides that, 80 pairs were dropped from our dataset because they contain nearly identical sentences or completely different sentences (improperly paired due to alignment error). Therefore, all the three totals in Table 6 were reduced by 80, resulting in 10,536, 4,888, and 2,520 sentences, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 799, |
|
"end": 806, |
|
"text": "Table 6", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Label Description", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this paper, we presented a new resource to evaluate the task of sentence readability for Portuguese language -the corpus PorSimplesSent. This dataset is larger, in terms of sentence pairs, than a similar corpus for the English language (cg. (Vajjala and Meurers, 2016) ), and it is the first resource of this kind for Portuguese language, therefore we believe we can have a blossom of future research for this task. Moreover, we made available four baselines for the corpus and an approach based on pairwise ranking to compare two versions of a sentence. Our model uses 17 lexical, syntactic and psycholinguistic features and identifies the readability level of sentence pairs with an accuracy of 74.2%; an improvement of 2.65 on the strongest baseline. We believe there is plenty of room for improvement of our model and we hope this task receive a lot of attention from researchers devoted to Portuguese language NLP as well. The corpus is made publicly available at http://www.nilc.icmc.usp.br/nilc/index.php/ tools-and-resources. As for future work, we will enlarge the number of features to build an improved model to evaluate the task and organize a shared task using it in an NLP conference.", |
|
"cite_spans": [ |
|
{ |
|
"start": 244, |
|
"end": 271, |
|
"text": "(Vajjala and Meurers, 2016)", |
|
"ref_id": "BIBREF44" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "http://www.ibge.gov.br/home/estatistica/economia/agropecuaria/censoagro/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.folha.uol.com.br 3 https://gauchazh.clicrbs.com.br", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.cs.cornell.edu/people/tj/svm_light/svm_rank.html 5 http://143.107.183.175:22680 6 http://143.107.183.175:22380 7 http://conteudo.icmc.usp.br/pessoas/taspardo/NILCTR0808.pdf", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Syllables per content word: Average number of syllables per content word", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Syllables per content word: Average number of syllables per content word;", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Words per sentence: Number of words in the sentence", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "2. Words per sentence: Number of words in the sentence;", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Classic formula, its a type token ratio form less sensitive to text size", |
|
"authors": [ |
|
{ |
|
"first": ";", |
|
"middle": [], |
|
"last": "Brunet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Brunet: Classic formula, its a type token ratio form less sensitive to text size (Thomas et al., 2005);", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Honor\u00e9: Classic formula similar to Brunet but vocabulary-based", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Honor\u00e9: Classic formula similar to Brunet but vocabulary-based (Thomas et al., 2005);", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Mean verb phrase per sentence: Measures the quantity of verb phrases per sentence (implemented via tagger, counts verbs in a sentence)", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mean verb phrase per sentence: Measures the quantity of verb phrases per sentence (im- plemented via tagger, counts verbs in a sentence);", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Yngve: Measures how much a syntactic tree escapes from the pattern that tend to have branches to the right (Yngve", |
|
"authors": [], |
|
"year": 1960, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yngve: Measures how much a syntactic tree escapes from the pattern that tend to have branches to the right (Yngve, 1960);", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A bottom-up approach to calculate syntactic complexity of a sentence", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Frazier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1985, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Frazier: A bottom-up approach to calculate syntactic complexity of a sentence (Frazier, 1985);", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Dependency distance: Calculates dependency distances in the syntactic tree", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "8. Dependency distance: Calculates dependency distances in the syntactic tree; as dependency distances grows, the text complexity grows together;", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Apposition per clause: Number of appositions in the sentence divided per number of clauses", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Apposition per clause: Number of appositions in the sentence divided per number of clauses;", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Clauses per sentence: Number of clauses in a sentence", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "10", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "10. Clauses per sentence: Number of clauses in a sentence (implemented via parser Palavras (Bick, 2000); counts main verbs, excluding auxiliary verbs);", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Max noun phrase: Maximum length of noun phrase in a sentence", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Max noun phrase: Maximum length of noun phrase in a sentence, calculated in words;", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Mean noun phrase: Mean of noun phrase length in a sentence", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "12. Mean noun phrase: Mean of noun phrase length in a sentence, calculated in words;", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Postponed subject ratio: Occurrence of Verb-Subject order instead of canonical Subject-Verb order", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "13. Postponed subject ratio: Occurrence of Verb-Subject order instead of canonical Subject- Verb order, calculated in relation to the total number of clauses;", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Subordinate clauses: Proportion of subordinate clauses to the total number of clauses", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "14", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "14. Subordinate clauses: Proportion of subordinate clauses to the total number of clauses;", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Infinite subordinate clauses: Proportion of subordinate clauses made by verbs in infinitive", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "15. Infinite subordinate clauses: Proportion of subordinate clauses made by verbs in infinitive, gerund and past participle form;", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Non-inflected verbs: Number of verbs that have not been inflected, that is, which are in infinite form: infinitive", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Non-inflected verbs: Number of verbs that have not been inflected, that is, which are in infinite form: infinitive, gerund and past participle;", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Words before main verb: Number of words before the main verbal phrase", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "17. Words before main verb: Number of words before the main verbal phrase.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "As baselines for our tests, we chose four unique features and evaluated them individually on SVM Rank : a) Words before main verb, b) Clauses per sentence, c) Syllables per content word and d) Tokens per sentence", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Evaluation The 10-fold cross validation accuracy results are displayed in Table 9. As baselines for our tests, we chose four unique features and evaluated them individually on SVM Rank : a) Words before main verb, b) Clauses per sentence, c) Syllables per content word and d) Tokens per sentence.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Evaluating progression of alzheimer's disease by regression and classification methods in a narrative language test in portuguese", |
|
"authors": [ |
|
{ |
|
"first": "Sandra", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Alu\u00edsio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andre", |
|
"middle": [], |
|
"last": "Cunha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carolina", |
|
"middle": [], |
|
"last": "Scarton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "12th International Conference on Computational Processing of the Portuguese Language", |
|
"volume": "9727", |
|
"issue": "", |
|
"pages": "109--114", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sandra M. Alu\u00edsio, Andre Cunha, and Carolina Scarton. 2016. Evaluating progression of alzheimer's disease by regression and classification methods in a narrative language test in portuguese. In 12th International Conference on Computational Processing of the Portuguese Language (PROPOR 2016), volume 9727 of Lecture Notes in Computer Science, pages 109-114. Springer Cham.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Fostering digital inclusion and accessibility: the Porsimples project for simplication of portuguese texts", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Sandra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caroline", |
|
"middle": [], |
|
"last": "Alu\u00edsio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gasperin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the NAACL HLT 2010 Young Investigators Workshop on Computational Approaches to Languages of the Americas -Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "46--53", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sandra M. Alu\u00edsio and Caroline Gasperin. 2010. Fostering digital inclusion and accessibility: the Por- simples project for simplication of portuguese texts. In Proceedings of the NAACL HLT 2010 Young Investigators Workshop on Computational Approaches to Languages of the Americas -Association for Computational Linguistics, pages 46-53, Stroudsburg, PA.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Assessing relative sentence complexity using an incremental CCG parser", |
|
"authors": [ |
|
{ |
|
"first": "Bharat", |
|
"middle": [ |
|
"Ram" |
|
], |
|
"last": "Ambati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siva", |
|
"middle": [], |
|
"last": "Reddy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Steedman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "The Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1051--1057", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharat Ram Ambati, Siva Reddy, and Mark Steedman. 2016. Assessing relative sentence complexity using an incremental CCG parser. In HLT-NAACL, pages 1051-1057, Stroudsburg, PA. The Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "The parsing system Palavras: Automatic grammatical analysis of Portuguese in a Constraint Grammar Framework", |
|
"authors": [ |
|
{ |
|
"first": "Eckhard", |
|
"middle": [], |
|
"last": "Bick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eckhard Bick. 2000. The parsing system Palavras: Automatic grammatical analysis of Portuguese in a Constraint Grammar Framework. Aarhus University Press.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Building a Brazilian Portuguese parallel corpus of original and simplified texts", |
|
"authors": [ |
|
{ |
|
"first": "Helena", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Caseli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tiago", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Pereira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L\u00facia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Thiago", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caroline", |
|
"middle": [], |
|
"last": "Pardo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Gasperin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Alu\u00edsio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Advances in Computational Linguistics", |
|
"volume": "41", |
|
"issue": "", |
|
"pages": "59--70", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Helena M. Caseli, Tiago F. Pereira, L\u00facia Specia, Thiago A. S. Pardo, Caroline Gasperin, and Sandra M. Alu\u00edsio. 2009. Building a Brazilian Portuguese parallel corpus of original and simplified texts. In Advances in Computational Linguistics, Research in Computer Science (CICLing-2009), volume 41, pages 59-70.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "OpenWordNet-PT: An open Brazilian Wordnet for reasoning", |
|
"authors": [ |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Valeria De Paiva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerard", |
|
"middle": [], |
|
"last": "Rademaker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "De Melo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of COLING 2012: Demonstration Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "353--360", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Valeria de Paiva, Alexandre Rademaker, and Gerard de Melo. 2012. OpenWordNet-PT: An open Brazi- lian Wordnet for reasoning. In Proceedings of COLING 2012: Demonstration Papers, pages 353-360, Mumbai, India, December. The COLING 2012 Organizing Committee. Published also as Techreport http://hdl.handle.net/10438/10274.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Read-it: Assessing readability of Italian texts with a view to text simplification", |
|
"authors": [ |
|
{ |
|
"first": "Felice", |
|
"middle": [], |
|
"last": "Dell'orletta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simonetta", |
|
"middle": [], |
|
"last": "Montemagni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giulia", |
|
"middle": [], |
|
"last": "Venturi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "SLPAT '11 Proceedings of the Second Workshop on Speech and Language Processing for Assistive Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "73--83", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Felice Dell'Orletta, Simonetta Montemagni, and Giulia Venturi. 2011. Read-it: Assessing readability of Italian texts with a view to text simplification. In SLPAT '11 Proceedings of the Second Workshop on Speech and Language Processing for Assistive Technologies, pages 73-83, Stroudsburg, PA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Assessing document and sentence readability in less resourced languages and across textual genres", |
|
"authors": [ |
|
{ |
|
"first": "Felice", |
|
"middle": [], |
|
"last": "Del'orletta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simonetta", |
|
"middle": [], |
|
"last": "Montemagni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giulia", |
|
"middle": [], |
|
"last": "Venturi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "International Journal of Applied Linguistics (ITL). Special Issue on Readability and Text Simplification", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Felice Del'Orletta, Simonetta Montemagni, and Giulia Venturi. 2014. Assessing document and sentence readability in less resourced languages and across textual genres. International Journal of Applied Linguistics (ITL). Special Issue on Readability and Text Simplification.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "A lightweight regression method to infer psycholinguistic properties for Brazilian Portuguese", |
|
"authors": [ |
|
{ |
|
"first": "Leandro", |
|
"middle": [], |
|
"last": "Borges Dos Santos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Magali", |
|
"middle": [ |
|
"Sanches" |
|
], |
|
"last": "Duran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Siegle Hartmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gustavo Henrique Paetzold Arnaldo", |
|
"middle": [], |
|
"last": "Candido", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [ |
|
"Maria" |
|
], |
|
"last": "Aluisio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "International Conference on Text, Speech, and Dialogue", |
|
"volume": "10415", |
|
"issue": "", |
|
"pages": "281--289", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leandro Borges dos Santos, Magali Sanches Duran, Nathan Siegle Hartmann, Gustavo Henrique Paet- zold Arnaldo Candido, and Sandra Maria Aluisio. 2017. A lightweight regression method to infer psycholinguistic properties for Brazilian Portuguese. In International Conference on Text, Speech, and Dialogue (TSD 2017), volume 10415 of Lecture Notes in Computer Science, pages 281-289. Springer, Cham.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Smart Language: Readers, Readability, and the Grading of Text. Impact Information", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "William", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dubay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William H. Dubay. 2007. Smart Language: Readers, Readability, and the Grading of Text. Impact Information, Costa Mesa, CA.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Syntactic complexity", |
|
"authors": [ |
|
{ |
|
"first": "Lyn", |
|
"middle": [], |
|
"last": "Frazier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1985, |
|
"venue": "Natural Language Parsing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lyn Frazier. 1985. Syntactic complexity. D.R. Dowty, L. Karttunen and A.M. Zwicky (eds.), Natural Language Parsing, Cambridge University Press.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "The multilingual paraphrase database", |
|
"authors": [ |
|
{ |
|
"first": "Juri", |
|
"middle": [], |
|
"last": "Ganitkevitch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": ";", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Khalid", |
|
"middle": [], |
|
"last": "Choukri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thierry", |
|
"middle": [], |
|
"last": "Declerck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hrafn", |
|
"middle": [], |
|
"last": "Loftsson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bente", |
|
"middle": [], |
|
"last": "Maegaard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Juri Ganitkevitch and Chris Callison-Burch. 2014. The multilingual paraphrase database. In Nicoletta Calzolari (Conference Chair), Khalid Choukri, Thierry Declerck, Hrafn Loftsson, Bente Maegaard, Joseph Mariani, Asuncion Moreno, Jan Odijk, and Stelios Piperidis, editors, Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14), Reykjavik, Iceland, may. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Using gaze to predict text readability", |
|
"authors": [ |
|
{ |
|
"first": "Ana", |
|
"middle": [], |
|
"last": "Valeria Gonzalez-Gardu\u00f1o", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 12th Workshop on Innovative Use of NLP for Building Educational Applications", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "438--443", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ana Valeria Gonzalez-Gardu\u00f1o and Anders S\u00f8gaard. 2017. Using gaze to predict text readability. In Proceedings of the 12th Workshop on Innovative Use of NLP for Building Educational Applications, pages 438-443, Stroudsburg, PA. The Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Conto.pt: Groundwork for the automatic creation of a fuzzy portuguese wordnet", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hugo Gon\u00e7alo Oliveira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "12th International Conference on Computational Processing of the Portuguese Language", |
|
"volume": "9727", |
|
"issue": "", |
|
"pages": "283--295", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hugo Gon\u00e7alo Oliveira. 2016. Conto.pt: Groundwork for the automatic creation of a fuzzy portuguese wordnet. In 12th International Conference on Computational Processing of the Portuguese Language (PROPOR 2016), volume 9727 of Lecture Notes in Computer Science, pages 283-295. Springer Cham.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Coh-metrix: Analysis of text on cohesion and language", |
|
"authors": [ |
|
{ |
|
"first": "Arthur", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Graesser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danielle", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Mcnamara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Louwerse", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiqiang", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Behavior Research Methods, Instruments, & Computers", |
|
"volume": "36", |
|
"issue": "", |
|
"pages": "193--202", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arthur C. Graesser, Danielle S. McNamara, Max M. Louwerse, and Zhiqiang Cai. 2004. Coh-metrix: Analysis of text on cohesion and language. Behavior Research Methods, Instruments, & Computers, 36:193-202.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Psycholinguistic models of sentence processing improve sentence readability ranking", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "David", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vera", |
|
"middle": [], |
|
"last": "Howcroft", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Demberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "958--968", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David M. Howcroft and Vera Demberg. 2017. Psycholinguistic models of sentence processing improve sentence readability ranking. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics, pages 958-968, Stroudsburg, PA. The Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Aligning sentences from Standard Wikipedia to Simple Wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Hwang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mari", |
|
"middle": [], |
|
"last": "Ostendorf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "211--217", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William Hwang, Hannaneh Hajishirzi, Mari Ostendorf, and Wei Wu. 2015. Aligning sentences from Standard Wikipedia to Simple Wikipedia. In Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 211- 217, Stroudsburg, PA. The Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Inaf brasil 2015: Indicador de alfabetismo funcional -alfabetismo no mundo do trabalho", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ipm", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "IPM. 2016. Inaf brasil 2015: Indicador de alfabetismo funcional -alfabetismo no mundo do trabalho. Instituto Paulo Montenegro.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Training linear SVMs in linear time", |
|
"authors": [ |
|
{ |
|
"first": "Thorsten", |
|
"middle": [], |
|
"last": "Joachims", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 12th ACM SIGKDD international conference on Knowledge discovery and data mining", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "217--226", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thorsten Joachims. 2006. Training linear SVMs in linear time. In Proceedings of the 12th ACM SIGKDD international conference on Knowledge discovery and data mining, volume 3, pages 217-226. ACM Press.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Revisiting the readability assessment of texts in Portuguese", |
|
"authors": [ |
|
{ |
|
"first": "Carolina", |
|
"middle": [], |
|
"last": "Scarton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caroline", |
|
"middle": [], |
|
"last": "Gasperin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [ |
|
"Alu\u00edsio" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "12th Ibero-American Conference on AI, Advances in Artificial Intelligence -IBERAMIA 2010", |
|
"volume": "6433", |
|
"issue": "", |
|
"pages": "306--315", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Carolina Scarton, Caroline Gasperin, and Sandra Alu\u00edsio. 2010. Revisiting the readability assessment of texts in Portuguese. In Simari G.R. Kuri-Morales A., editor, 12th Ibero-American Conference on AI, Advances in Artificial Intelligence -IBERAMIA 2010, volume 6433 of Lecture Notes in Computer Science, pages 306-315, Berlin, Heidelberg. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Simpa: A sentence-level simplification corpus for the public administration domain", |
|
"authors": [ |
|
{ |
|
"first": "Carolina", |
|
"middle": [], |
|
"last": "Scarton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gustavo", |
|
"middle": [], |
|
"last": "Paetzold", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": ";", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Khalid", |
|
"middle": [], |
|
"last": "Choukri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Cieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thierry", |
|
"middle": [], |
|
"last": "Declerck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Goggi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Koiti", |
|
"middle": [], |
|
"last": "Hasida", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hitoshi", |
|
"middle": [], |
|
"last": "Isahara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bente", |
|
"middle": [], |
|
"last": "Maegaard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joseph", |
|
"middle": [], |
|
"last": "Mariani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Carolina Scarton, Gustavo Paetzold, and Lucia Specia. 2018. Simpa: A sentence-level simplification corpus for the public administration domain. In Nicoletta Calzolari (Conference chair), Khalid Choukri, Christopher Cieri, Thierry Declerck, Sara Goggi, Koiti Hasida, Hitoshi Isahara, Bente Maegaard, Joseph Mariani, H\u00e9l\u00e8ne Mazo, Asuncion Moreno, Jan Odijk, Stelios Piperidis, and Takenobu Tokunaga, editors, Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), Paris, France, may. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Quantifying sentence complexity based on eye-tracking measures", |
|
"authors": [ |
|
{ |
|
"first": "Abhinav Deep", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Poojan", |
|
"middle": [], |
|
"last": "Mehta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samar", |
|
"middle": [], |
|
"last": "Husain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rajakrishnan", |
|
"middle": [], |
|
"last": "Rajkumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Workshop on Computational Linguistics for Linguistic Complexity", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "202--212", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhinav Deep Singh, Poojan Mehta, Samar Husain, and Rajakrishnan Rajkumar. 2016. Quantifying sentence complexity based on eye-tracking measures. In Proceedings of the Workshop on Computational Linguistics for Linguistic Complexity, pages 202-212, Osaka, Japan. The COLING 2016 Organizing Committee.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Probability as readability: A new machine learning approach to readability assessment for written Swedish", |
|
"authors": [ |
|
{ |
|
"first": "Johan", |
|
"middle": [], |
|
"last": "Sj\u00f6holm", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Johan Sj\u00f6holm. 2012. Probability as readability: A new machine learning approach to readability assess- ment for written Swedish. LiU Electronic Press.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Manual de simplifica\u00e7\u00e3o sint\u00e1tica para o portugu\u00eas", |
|
"authors": [ |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Alu\u00edsio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Thiago", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pardo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "ICMC-USP, jun. S\u00e9rie de Relat\u00f3rios do N\u00facleo Interinstitucional de Ling\u00fc\u00edstica Computacional", |
|
"volume": "27", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lucia Specia, Sandra M. Alu\u00edsio, and Thiago A. S. Pardo. 2008. Manual de simplifica\u00e7\u00e3o sint\u00e1tica para o portugu\u00eas. NILC Technical Report 08-06, ICMC-USP, jun. S\u00e9rie de Relat\u00f3rios do N\u00facleo Interinstitucional de Ling\u00fc\u00edstica Computacional (NILC-TR-08-06), 27 p.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Automatic detection and rating of dementia of alzheimer type through lexical analysis of spontaneous speech", |
|
"authors": [ |
|
{ |
|
"first": "Calvin", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vlado", |
|
"middle": [], |
|
"last": "Keselj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Cercone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Rockwood", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elissa", |
|
"middle": [], |
|
"last": "Asp", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of IEEE ICMA 2005", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "1569--1574", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Calvin Thomas, Vlado Keselj, Nick Cercone, Kenneth Rockwood, and Elissa Asp. 2005. Automatic detection and rating of dementia of alzheimer type through lexical analysis of spontaneous speech. In Proceedings of IEEE ICMA 2005, volume 3, pages 1569-1574. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Assessing the relative reading level of sentence pairs for text simplification", |
|
"authors": [ |
|
{ |
|
"first": "Sowmya", |
|
"middle": [], |
|
"last": "Vajjala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Detmar", |
|
"middle": [], |
|
"last": "Meurers", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics (EACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "288--297", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sowmya Vajjala and Detmar Meurers. 2014. Assessing the relative reading level of sentence pairs for text simplification. Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics (EACL), pages 288-297.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Readability-based sentence ranking for evaluating text simplification", |
|
"authors": [ |
|
{ |
|
"first": "Sowmya", |
|
"middle": [], |
|
"last": "Vajjala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Detmar", |
|
"middle": [], |
|
"last": "Meurers", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sowmya Vajjala and Detmar Meurers. 2016. Readability-based sentence ranking for evaluating text simplification. CoRR, abs/1603.06009.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Open educational resources: A review of the literature", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Wiley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Bliss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mary", |
|
"middle": [], |
|
"last": "Mcewen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Handbook of Research on Educational Communications and Technology: Fourth Edition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "781--789", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Wiley, T.J. Bliss, and Mary McEwen. 2014. Open educational resources: A review of the literature. In Spector J., Merrill M., Elen J., and Bishop M., editors, Handbook of Research on Educational Communications and Technology: Fourth Edition, pages 781-789, New York, NY. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Problems in current text simplification research: New data can help", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Courtney", |
|
"middle": [], |
|
"last": "Napoles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "283--297", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Xu, Chris Callison-Burch, and Courtney Napoles. 2015. Problems in current text simplification research: New data can help. Transactions of the Association for Computational Linguistics, 3:283- 297.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "A model and hypothesis for language structure", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Victor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yngve", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1960, |
|
"venue": "Proceedings of the American Philosophical Association", |
|
"volume": "104", |
|
"issue": "5", |
|
"pages": "444--466", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Victor H Yngve. 1960. A model and hypothesis for language structure. Proceedings of the American Philosophical Association, 104(5):444-466.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "A monolingual tree-based translation model for sentence simplification", |
|
"authors": [ |
|
{ |
|
"first": "Zhemin", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Delphine", |
|
"middle": [], |
|
"last": "Bernhard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of The 23rd International Conference on Computational Linguistics (COLING)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1353--1361", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhemin Zhu, Delphine Bernhard, and Iryna Gurevych. 2010. A monolingual tree-based translation model for sentence simplification. In Proceedings of The 23rd International Conference on Computational Linguistics (COLING), August 2010. Beijing, China, pages 1353-1361. The COLING 2010 Organizing Committee.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "SotA results using Wikipedia-SimpleWikipedia corpus.", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "SotA accuracy results using OSE2 corpus.", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF5": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Distribution of original sentences according to the level of simplification.", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF7": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Distribution of pairs by level in the three versions of PorSimplesSent.", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF8": { |
|
"content": "<table><tr><td>L</td><td>S</td><td>CpS ApC</td><td>DD</td><td colspan=\"3\">MaxNP MeanNP SC MVPpS NIV PSR</td><td>ISC</td></tr><tr><td colspan=\"4\">O 2372 2.62 0.07 48.24</td><td>9.87</td><td>5.84</td><td>0.38</td><td>2.24</td><td>0.31 0.085 0.179</td></tr><tr><td colspan=\"4\">N 3535 1.95 0.02 28.39</td><td>7.35</td><td>4.79</td><td>0.26</td><td>1.71</td><td>0.22 0.051 0.124</td></tr><tr><td colspan=\"4\">S 2402 1.74 0.01 22.16</td><td>6.48</td><td>4.39</td><td>0.24</td><td>1.55</td><td>0.21 0.052 0.117</td></tr></table>", |
|
"type_str": "table", |
|
"text": "table, S stands for Number of Sentences, CpS for Clauses per Sentence, ApC for Apposition per Clause, DD for Dependency Distance, MaxNP and MeanNP for Max and Mean Noun Phrase, SC for Subordinate Clauses, MVPpS for Mean Verb Phrase per Sentence, NIV for Non Inflected Verbs, PSR for Postponed Subject Ratio and ISC for Infinite Subordinate Clauses.Table 8shows mean values of lexical and psycholinguistic metrics for Original (O), Natural (N) and Strong (S) sentence levels in PorSimples corpus. In the table, WpS stands for Words per sentence, SpCW for Syllables per Content Words and WbMV for Words before Main Verbs.", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF9": { |
|
"content": "<table><tr><td colspan=\"7\">Distribution of corpus sentences according to the level (L) of simplification -Syntactic</td></tr><tr><td>Metrics.</td><td/><td/><td/><td/><td/><td/></tr><tr><td colspan=\"7\">L WpS SpCW WbMV Yngve Frazier Honor\u00e9 Brunet</td></tr><tr><td>O 21.01</td><td>2.86</td><td>6.16</td><td>2.89</td><td>7.38</td><td>1214.16</td><td>40.29</td></tr><tr><td>N 14.77</td><td>2.74</td><td>4.09</td><td>2.43</td><td>6.64</td><td>727.87</td><td>51.44</td></tr><tr><td>S 12.79</td><td>2.76</td><td>3.73</td><td>2.32</td><td>6.48</td><td>563.98</td><td>52.14</td></tr></table>", |
|
"type_str": "table", |
|
"text": "", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF10": { |
|
"content": "<table><tr><td>The features are from three different groups: 1-4 are lexical; 5-16 measures syntactic comple-</td></tr><tr><td>xity, and the last one is a psycholinguistic measure of working memory overload:</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Distribution of corpus sentences according to the level (L) of simplification -Lexical, Psycholinguistic and the Classic Syntactic Metrics of Yngve and Frazier.", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF12": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "List of Errors used to annotate 418 sentence pairs of PorSimplesSent3.", |
|
"html": null, |
|
"num": null |
|
} |
|
} |
|
} |
|
} |