|
{ |
|
"paper_id": "W05-0206", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T04:46:02.936821Z" |
|
}, |
|
"title": "Automatic Essay Grading with Probabilistic Latent Semantic Analysis", |
|
"authors": [ |
|
{ |
|
"first": "Tuomo", |
|
"middle": [], |
|
"last": "Kakkonen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Joensuu", |
|
"location": { |
|
"postBox": "P.O. Box 111", |
|
"postCode": "FI-80101", |
|
"settlement": "Joensuu", |
|
"country": "FINLAND" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Niko", |
|
"middle": [], |
|
"last": "Myller", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Joensuu", |
|
"location": { |
|
"postBox": "P.O. Box 111", |
|
"postCode": "FI-80101", |
|
"settlement": "Joensuu", |
|
"country": "FINLAND" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jari", |
|
"middle": [], |
|
"last": "Timonen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Joensuu", |
|
"location": { |
|
"postBox": "P.O. Box 111", |
|
"postCode": "FI-80101", |
|
"settlement": "Joensuu", |
|
"country": "FINLAND" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Erkki", |
|
"middle": [], |
|
"last": "Sutinen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Joensuu", |
|
"location": { |
|
"postBox": "P.O. Box 111", |
|
"postCode": "FI-80101", |
|
"settlement": "Joensuu", |
|
"country": "FINLAND" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Probabilistic Latent Semantic Analysis (PLSA) is an information retrieval technique proposed to improve the problems found in Latent Semantic Analysis (LSA). We have applied both LSA and PLSA in our system for grading essays written in Finnish, called Automatic Essay Assessor (AEA). We report the results comparing PLSA and LSA with three essay sets from various subjects. The methods were found to be almost equal in the accuracy measured by Spearman correlation between the grades given by the system and a human. Furthermore, we propose methods for improving the usage of PLSA in essay grading.", |
|
"pdf_parse": { |
|
"paper_id": "W05-0206", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Probabilistic Latent Semantic Analysis (PLSA) is an information retrieval technique proposed to improve the problems found in Latent Semantic Analysis (LSA). We have applied both LSA and PLSA in our system for grading essays written in Finnish, called Automatic Essay Assessor (AEA). We report the results comparing PLSA and LSA with three essay sets from various subjects. The methods were found to be almost equal in the accuracy measured by Spearman correlation between the grades given by the system and a human. Furthermore, we propose methods for improving the usage of PLSA in essay grading.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The main motivations behind developing automated essay assessment systems are to decrease the time in which students get feedback for their writings, and to reduce the costs of grading. The assumption in most of the systems is that the grades given by the human assessors describe the true quality of an essay. Thus, the aim of the systems is to \"simulate\" the grading process of a human grader and a system is usable only if it is able to perform the grading as accurately as human raters. An automated assessment system is not affected by errors caused by lack of consistency, fatigue or bias, thus it can help achieving better accuracy and objectivity of assessment (Page and Petersen, 1995) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 669, |
|
"end": 694, |
|
"text": "(Page and Petersen, 1995)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There has been research on automatic essay grading since the 1960s. The earliest systems, such as PEG (Page and Petersen, 1995) , based their grading on the surface information from the essay. For example, the number of words and commas were counted in order to determine the quality of the essays (Page, 1966) . Although these kinds of systems performed considerably well, they also received heavy criticism (Page and Petersen, 1995) . Some researchers consider the use of natural language as a feature for human intelligence (Hearst et al., 2000) and writing as a method to express the intelligence. Based on that assumption, taking the surface information into account and ignoring the meanings of the content is insufficient. Recent systems and studies, such as e-rater (Burstein, 2003) and approaches based on LSA (Landauer et al., 1998) , have focused on developing the methods which determine the quality of the essays with more analytic measures such as syntactic and semantic structure of the essays. At the same time in the 1990s, the progress of natural language processing and information retrieval techniques have given the opportunity to take also the meanings into account.", |
|
"cite_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 127, |
|
"text": "(Page and Petersen, 1995)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 298, |
|
"end": 310, |
|
"text": "(Page, 1966)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 409, |
|
"end": 434, |
|
"text": "(Page and Petersen, 1995)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 527, |
|
"end": 548, |
|
"text": "(Hearst et al., 2000)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 774, |
|
"end": 790, |
|
"text": "(Burstein, 2003)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 819, |
|
"end": 842, |
|
"text": "(Landauer et al., 1998)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "LSA has produced promising results in content analysis of essays (Landauer et al., 1997; Foltz et al., 1999b) . Intelligent Essay Assessor (Foltz et al., 1999b) and Select-a-Kibitzer (Wiemer-Hastings and Graesser, 2000) apply LSA for assessing essays written in English. In Apex (Lemaire and Dessus, 2001) , LSA is applied to essays written in French. In addition to the essay assessment, LSA is applied to other educational applications. An intelligent tutoring system for providing help for students (Wiemer-Hastings et al., 1999) and Summary Street (Steinhart, 2000) , which is a system for assessing summaries, are some examples of other applications of LSA. To our knowledge, there is no system utilizing PLSA (Hofmann, 2001) for automated essay assessment or related tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 88, |
|
"text": "(Landauer et al., 1997;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 89, |
|
"end": 109, |
|
"text": "Foltz et al., 1999b)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 139, |
|
"end": 160, |
|
"text": "(Foltz et al., 1999b)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 165, |
|
"end": 219, |
|
"text": "Select-a-Kibitzer (Wiemer-Hastings and Graesser, 2000)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 279, |
|
"end": 305, |
|
"text": "(Lemaire and Dessus, 2001)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 502, |
|
"end": 532, |
|
"text": "(Wiemer-Hastings et al., 1999)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 552, |
|
"end": 569, |
|
"text": "(Steinhart, 2000)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 715, |
|
"end": 730, |
|
"text": "(Hofmann, 2001)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We have developed an essay grading system, Automatic Essay Assessor (AEA), to be used to analyze essay answers written in Finnish, although the system is designed in a way that it is not limited to only one language. It applies both course materials, such as passages from lecture notes and course textbooks covering the assignment-specific knowledge, and essays graded by humans to build the model for assessment. In this study, we employ both LSA and PLSA methods to determine the similarities between the essays and the comparison materials in order to determine the grades. We compare the accuracy of these methods by using the Spearman correlation between computer and human assigned grades.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The paper is organized as follows. Section 2 explains the architecture of AEA and the used grading methods. The experiment and results are discussed in Section 3. Conclusions and future work based on the experiment are presented in Section 4.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We have developed a system for automated assessment of essays . In this section, we explain the basic architecture of the system and describe the methods used to analyze essays.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "AEA System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "There are two approaches commonly used in the essay grading systems to determine the grade for the essay:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture of AEA", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "1. The essay to be graded is compared to the human-graded essays and the grade is based on the most similar essays' grades; or 2. The essay to be graded is compared to the essay topic related materials (e.g. textbook or model essays) and the grade is given based on the similarity to these materials.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture of AEA", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "In our system, AEA , we have combined these two approaches. The rel-evant parts of the learning materials, such as chapters of a textbook, are used to train the system with assignment-specific knowledge. The approaches based on the comparison between the essays to be graded and the textbook have been introduced in (Landauer et al., 1997; Foltz et al., 1999a; Lemaire and Dessus, 2001; Hearst et al., 2000) , but have been usually found less accurate than the methods based on comparison to prescored essays. Our method attempts to overcome this by combining the use of course content and prescored essays. The essays to be graded are not directly compared to the prescored essays with for instance k-nearest neighbors method, but prescored essays are used to determine the similarity threshold values for grade categories as discussed below. Prescored essays can also be used to determine the optimal dimension for the reduced matrix in LSA as discussed in Kakkonen et al. (2005) . Figure 1 illustrates the grading process of our system. The texts to be analyzed are added into wordby-context matrix (WCM), representing the number of occurrences of each unique word in each of the contexts (e.g. documents, paragraphs or sentences). In WCM M , cell M ij contains the count of the word i occurrences in the context j. As the first step in analyzing the essays and course materials, the lemma of each word form occurring in the texts must be found. We have so far applied AEA only to essays written in Finnish. Finnish is morphologically more complex than English, and word forms are formed by adding suffixes into base forms. Because of that, base forms have to be used instead of inflectional forms when building the WCM, especially if a relatively small corpus is utilized. Furthermore, several words can become synonyms when suffixes are added to them, thus making the word sense disambiguation necessary. Hence, instead of just stripping suffixes, we apply a more sophisticated method, a morphological parser and disambiguator, namely Constraint Grammar parser for Finnish (FINCG) to produce the lemmas for each word (Lingsoft, 2005) . In addition, the most commonly occurring words (stopwords) are not included in the matrix, and only the words that appear in at least two contexts are added into the WCM (Landauer et al., 1998) . We also apply entropy-based term weighting in order to give higher values to words that are more important for the content and lower values to words with less importance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 316, |
|
"end": 339, |
|
"text": "(Landauer et al., 1997;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 340, |
|
"end": 360, |
|
"text": "Foltz et al., 1999a;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 361, |
|
"end": 386, |
|
"text": "Lemaire and Dessus, 2001;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 387, |
|
"end": 407, |
|
"text": "Hearst et al., 2000)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 959, |
|
"end": 981, |
|
"text": "Kakkonen et al. (2005)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 2122, |
|
"end": 2138, |
|
"text": "(Lingsoft, 2005)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2311, |
|
"end": 2334, |
|
"text": "(Landauer et al., 1998)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 984, |
|
"end": 992, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Architecture of AEA", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "First, the comparison materials based on the relevant textbook passages or other course materials are modified into machine readable form with the method described in the previous paragraph. The vector for each context in the comparison materials is marked with Y i . This WCM is used to create the model with LSA, PLSA or another information retrieval method. To compare the similarity of an essay to the course materials, a query vector X j of the same form as the vectors in the WCM is constructed. The query vector X j representing an essay is added or folded in into the model build with WCM with the method specific way discussed later. This foldedin queryX j is then compared to the model of each text passage\u1ef8 i in the comparison material by using a similarity measure to determine the similarity value. We have used the cosine of the angle between (X j , Y i ), to measure the similarity of two documents. The similarity score for an essay is calculated as the sum of the similarities between the essay and each of the textbook passages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture of AEA", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The document vectors of manually graded essays are compared to the textbook passages, in order to determine the similarity scores between the essays and the course materials. Based on these measures, threshold values for the grade categories are defined as follows: the grade categories, g 1 , g 2 , . . . , g C , are associated with similarity value limits, l 1 , l 2 , . . . , l C+1 , where C is the number of grades, and l C+1 = \u221e and normally l 1 = 0 or \u2212\u221e. Other category limits l i , 2 \u2264 i \u2264 C, are defined as weighted averages of the similarity scores for essays belonging to grade categories g i and g i\u22121 . Other kinds of formulas to define the grade category limits can be also used.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture of AEA", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The grade for each essay to be graded is then determined by calculating the similarity score between the essay and the textbook passages and comparing the similarity score to the threshold values defined in the previous phase. The similarity score S i of an essay d i is matched to the grade categories according to their limits in order to determine the correct grade category as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture of AEA", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "For each i, 1 \u2264 i \u2264 C, if l i < S i \u2264 l i+1 then d i \u2208 g i and break.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture of AEA", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Latent Semantic Analysis (LSA) (Landauer et al., 1998 ) is a corpus-based method used in information retrieval with vector space models. It provides a means of comparing the semantic similarity between the source and target texts. LSA has been successfully applied to automate giving grades and feedback on free-text responses in several systems as discussed in Section 1. The basic assumption behind LSA is that there is a close relationship between the meaning of a text and the words in that text. The power of LSA lies in the fact that it is able to map the essays with similar wordings closer to each other in the vector space. The LSA method is able to strengthen the similarity between two texts even when they do not contain common words. We describe briefly the technical details of the method.", |
|
"cite_spans": [ |
|
{ |
|
"start": 31, |
|
"end": 53, |
|
"text": "(Landauer et al., 1998", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Latent Semantic Analysis", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The essence of LSA is dimension reduction based on the singular value decomposition (SVD), an algebraic technique. SVD is a form of factor analysis, which reduces the dimensionality of the original WCM and thereby increases the dependency between contexts and words (Landauer et al., 1998) . SVD is defined as X = T 0 S 0 D 0 T , where X is the preprocessed WCM and T 0 and D 0 are orthonormal matrices representing the words and the contexts. S 0 is a diagonal matrix with singular values. In the dimension reduction, the k highest singular values in S 0 are selected and the rest are ignored. With this operation, an approximation matrixX of the original matrix X is acquired. The aim of the dimension reduction is to reduce \"noise\" or unimportant details and to allow the underlying semantic structure to be-come evident (Deerwester et al., 1990) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 266, |
|
"end": 289, |
|
"text": "(Landauer et al., 1998)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 824, |
|
"end": 849, |
|
"text": "(Deerwester et al., 1990)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Latent Semantic Analysis", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In information retrieval and essay grading, the queries or essays have to be folded in into the model in order to calculate the similarities between the documents in the model and the query. In LSA, the folding in can be achieved with a simple matrix multiplication:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Latent Semantic Analysis", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "X q = X T q T 0 S \u22121 0 ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Latent Semantic Analysis", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where X q is the term vector constructed from the query document with preprocessing, and T 0 and S 0 are the matrices from the SVD of the model after dimension reduction. The resulting vectorX q is in the same format as the documents in the model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Latent Semantic Analysis", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The features that make LSA suitable for automated grading of essays can be summarized as follows. First, the method focuses on the content of the essay, not on the surface features or keywordbased content analysis. The second advantage is that LSA-based scoring can be performed with relatively low amount of human graded essays. Other methods, such as PEG and e-rater typically need several hundred essays to be able to form an assignmentspecific model (Shermis et al., 2001; Burstein and Marcu, 2000) whereas LSA-based IEA system has sometimes been calibrated with as few as 20 essays, though it typically needs more essays (Hearst et al., 2000) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 454, |
|
"end": 476, |
|
"text": "(Shermis et al., 2001;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 477, |
|
"end": 502, |
|
"text": "Burstein and Marcu, 2000)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 626, |
|
"end": 647, |
|
"text": "(Hearst et al., 2000)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Latent Semantic Analysis", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Although LSA has been successfully applied in information retrieval and related fields, it has also received criticism (Hofmann, 2001; Blei et al., 2003) . The objective function determining the optimal decomposition in LSA is the Frobenius norm. This corresponds to an implicit additive Gaussian noise assumption on the counts and may be inadequate. This seems to be acceptable with small document collections but with large document collections it might have a negative effect. LSA does not define a properly normalized probability distribution and, even worse, the approximation matrix may contain negative entries meaning that a document contains negative number of certain words after the dimension reduction. Hence, it is impossible to treat LSA as a generative language model and moreover, the use of different similarity measures is limited. Furthermore, there is no obvious interpretation of the directions in the latent semantic space. This might have an effect if also feedback is given. Choosing the number of dimensions in LSA is typically based on an ad hoc heuristics. However, there is research done aiming to resolve the problem of dimension selection in LSA, especially in the essay grading domain (Kakkonen et al., 2005) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 134, |
|
"text": "(Hofmann, 2001;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 135, |
|
"end": 153, |
|
"text": "Blei et al., 2003)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1216, |
|
"end": 1239, |
|
"text": "(Kakkonen et al., 2005)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Latent Semantic Analysis", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Probabilistic Latent Semantic Analysis (PLSA) (Hofmann, 2001 ) is based on a statistical model which has been called the aspect model. The aspect model is a latent variable model for co-occurrence data, which associates unobserved class variables z k , k \u2208 {1, 2, . . . , K} with each observation. In our settings, the observation is an occurrence of a word w j , j \u2208 {1, 2, . . . , M }, in a particular context d i , i \u2208 {1, 2, . . . , N }. The probabilities related to this model are defined as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 46, |
|
"end": 60, |
|
"text": "(Hofmann, 2001", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u2022 P (d i ) denotes the probability that a word occurrence will be observed in a particular context d i ;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u2022 P (w j |z k ) denotes the class-conditional probability of a specific word conditioned on the unobserved class variable z k ; and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u2022 P (z k |d i ) denotes a context specific probability distribution over the latent variable space.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "When using PLSA in essay grading or information retrieval, the first goal is to build up the model. In other words, approximate the probability mass functions with machine learning from the training data, in our case the comparison material consisting of assignment specific texts. Expectation Maximization (EM) algorithm can be used in the model building with maximum likelihood formulation of the learning task (Dempster et al., 1977) . In EM, the algorithm alternates between two steps: (i) an expectation (E) step where posterior probabilities are computed for the latent variables, based on the current estimates of the parameters, (ii) a maximization (M) step, where parameters are updated based on the loglikelihood which depends on the posterior probabilities computed in the E-step. The standard E-step is defined in equation (1).", |
|
"cite_spans": [ |
|
{ |
|
"start": 413, |
|
"end": 436, |
|
"text": "(Dempster et al., 1977)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P (z k |d i , w j ) = P (w j |z k )P (z k |d i ) K l=1 P (w j |z l )P (z l |d i )", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The M-step is formulated in equations (2) and 3as derived by Hofmann (2001) . These two steps are alternated until a termination condition is met, in this case, when the maximum likelihood function has converged.", |
|
"cite_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 75, |
|
"text": "Hofmann (2001)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P (w j |z k ) = N i=1 n(d i , w j )P (z k |d i , w j ) M m=1 N i=1 n(d i , w m )P (z k |d i , w m ) (2) P (z k |d i ) = M j=1 n(d i , w j )P (z k |d i , w j ) M m=1 n(d i , w m )", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Although standard EM algorithm can lead to good results, it may also overfit the model to the training data and perform poorly with unseen data. Furthermore, the algorithm is iterative and converges slowly, which can increase the runtime seriously. Hence, Hofmann (2001) proposes another approach called Tempered EM (TEM), which is a derivation of standard EM algorithm. In TEM, the M-step is the same as in EM, but a dampening parameter is introduced into the E-step as shown in equation 4. The parameter \u03b2 will dampen the posterior probabilities closer to uniform distribution, when \u03b2 < 1 and form the standard E-step when \u03b2 = 1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 256, |
|
"end": 270, |
|
"text": "Hofmann (2001)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "P (z k |d i , w j ) = (P (w j |z k )P (z k |d i )) \u03b2 K l=1 P (w j |z l )P (z l |d i ) \u03b2 (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Hofmann (2001) defines the TEM algorithm as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "1. Set \u03b2 := 1 and perform the standard EM with early stopping.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "2. Set \u03b2 := \u03b7\u03b2 (with \u03b7 < 1).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "3. Repeat the E-and M-steps until the performance on hold-out data deteriorates, otherwise go to step 2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "4. Stop the iteration when decreasing \u03b2 does not improve performance on hold-out data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Early stopping means that the optimization is not done until the model converges, but the iteration is stopped already once the performance on hold-out data degenerates. Hofmann (2001) proposes to use the perplexity to measure the generalization performance of the model and the stopping condition for the early stopping. The perplexity is defined as the log-averaged inverse probability on unseen data calculated as in equation 5.", |
|
"cite_spans": [ |
|
{ |
|
"start": 170, |
|
"end": 184, |
|
"text": "Hofmann (2001)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P = exp \u2212 i,j n \u2032 (d i , w j ) log P (w j |d i ) i,j n \u2032 (d i , w j ) ,", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "n \u2032 (d i , w j )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "is the count on hold-out or training data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "In PLSA, the folding in is done by using TEM as well. The only difference when folding in a new document or query q outside the model is that just the probabilities P (z k |q) are updated during the Mstep and the P (w j |z k ) are kept as they are. The similarities between a document d i in the model and a query q folded in to the model can be calculated with the cosine of the angle between the vectors containing the probability distributions (P (z k |q)) K k=1 and (P (z k |d i )) K k=1 (Hofmann, 2001 ). PLSA, unlike LSA, defines proper probability distributions to the documents and has its basis in Statistics. It belongs to a framework called Latent Dirichlet Allocations (Girolami and Kab\u00e1n, 2003; Blei et al., 2003) , which gives a better grounding for this method. For instance, several probabilistic similarity measures can be used. PLSA is interpretable with its generative model, latent classes and illustrations in N -dimensional space (Hofmann, 2001 ). The latent classes or topics can be used to determine which part of the comparison materials the student has answered and which ones not.", |
|
"cite_spans": [ |
|
{ |
|
"start": 492, |
|
"end": 506, |
|
"text": "(Hofmann, 2001", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 681, |
|
"end": 707, |
|
"text": "(Girolami and Kab\u00e1n, 2003;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 708, |
|
"end": 726, |
|
"text": "Blei et al., 2003)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 952, |
|
"end": 966, |
|
"text": "(Hofmann, 2001", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "In empirical research conducted by Hofmann (2001), PLSA yielded equal or better results compared to LSA in the contexts of information retrieval. It was also shown that the accuracy of PLSA can increase when the number of latent variables is increased. Furthermore, the combination of several similarity scores (e.g. cosines of angles between two documents) from models with different number of latent variables also increases the overall accuracy. Therefore, the selection of the dimension is not as crucial as in LSA. The problem with PLSA is that the algorithm used to computate the model, EM or its variant, is probabilistic and can converge to a local maximum. However, according to Hofmann (2001) , this is not a problem since the differences between separate runs are small. Flaws in the generative model and the overfitting problem 1 Education 70 73 0-6 Textbook Paragraphs 26 2397 2 Education 70 73 0-6 Textbook Sentences 147 2397 3 Communications 42 45 0-4 Textbook Paragraphs 45 1583 4 Communications 42 45 0-4 Textbook Sentences 139 1583 5 Soft. Eng. 26 27 0-10 *) Paragraphs 27 965 6", |
|
"cite_spans": [ |
|
{ |
|
"start": 688, |
|
"end": 702, |
|
"text": "Hofmann (2001)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 840, |
|
"end": 1083, |
|
"text": "1 Education 70 73 0-6 Textbook Paragraphs 26 2397 2 Education 70 73 0-6 Textbook Sentences 147 2397 3 Communications 42 45 0-4 Textbook Paragraphs 45 1583 4 Communications 42 45 0-4 Textbook Sentences 139 1583 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Soft. Eng. 26 27 0-10 *) Sentences 105 965 Table 1 : The essay sets used in the experiment. *) Comparison materials were constructed from the course handout with teacher's comments included and transparencies represented to the students.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 43, |
|
"end": 50, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "have been discussed in Blei et al. (2003) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 23, |
|
"end": 41, |
|
"text": "Blei et al. (2003)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Latent Semantic Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "To analyze the performance of LSA and PLSA in the essay assessment, we performed an experiment using three essay sets collected from courses on education, marketing and software engineering. The information about the essay collections is shown in Table 1. Comparison materials were taken either from the course book or other course materials and selected by the lecturer of the course. Furthermore, the comparison materials used in each of these sets were divided with two methods, either into paragraphs or sentences. Thus, we run the experiment in total with six different configurations of materials. We used our implementations of LSA and PLSA methods as described in Section 2. With LSA, all the possible dimensions (i.e. from two to the number of passages in the comparison materials) were searched in order to find the dimension achieving the highest accuracy of scoring, measured as the correlation between the grades given by the system and the human assessor. There is no upper limit for the number of latent variables in PLSA models as there is for the dimensions in LSA. Thus, we applied the same range for the best dimension search to be fair in the comparison. Furthermore, a linear combination of similarity values from PLSA models (PLSA-C) with predefined numbers of latent variables K \u2208 {16, 32, 48, 64, 80, 96, 112, 128} was used just to analyze the proposed potential of the method as discussed in Section 2.3 and in (Hofmann, 2001) . When building up all the PLSA mod-els with TEM, we used 20 essays from the training set of the essay collections to determine the early stopping condition with perplexity of the model on unseen data as proposed by Hofmann (2001) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 1304, |
|
"end": 1338, |
|
"text": "{16, 32, 48, 64, 80, 96, 112, 128}", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1436, |
|
"end": 1451, |
|
"text": "(Hofmann, 2001)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1668, |
|
"end": 1682, |
|
"text": "Hofmann (2001)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Procedure and Materials", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The results of the experiment for all the three methods, LSA, PLSA and PLSA-C are shown in Table 2 . It contains the most accurate dimension (column dim.) measured by machine-human correlation in grading, the percentage of the same (same) and adjacent grades (adj.) compared to the human grader and the Spearman correlation (cor.) between the grades given by the human assessor and the system.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 98, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The results indicate that LSA outperforms both methods using PLSA. This is opposite to the results obtained by Hofmann (2001) in information retrieval. We believe this is due to the size of the document collection used to build up the model. In the experiments of Hofmann (2001) , it was much larger, 1000 to 3000 documents, while in our case the number of documents was between 25 and 150. However, the differences are quite small when using the comparison materials divided into sentences. Although all methods seem to be more accurate when the comparison materials are divided into sentences, PLSA based methods seem to gain more than LSA.", |
|
"cite_spans": [ |
|
{ |
|
"start": 111, |
|
"end": 125, |
|
"text": "Hofmann (2001)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 264, |
|
"end": 278, |
|
"text": "Hofmann (2001)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In most cases, PLSA with the most accurate dimension and PLSA-C perform almost equally. This is also in contrast with the findings of Hofmann (2001) because in his experiments PLSA-C performed better than PLSA. This is probably also due to the small document sets used. Nevertheless, this means that finding the most accurate dimension is unnecessary, but it is enough to com- Table 2 : The results of the grading process with different methods.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 377, |
|
"end": 384, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "bine several dimensions' similarity values. In our case, it seems that linear combination of the similarity values is not the best option because the similarity values between essays and comparison materials decrease when the number of latent variables increases. A topic for a further study would be to analyze techniques to combine the similarity values in PLSA-C to obtain higher accuracy in essay grading. Furthermore, it seems that the best combination of dimensions in PLSA-C depends on the features of the document collection (e.g. number of passages in comparison materials or number of essays) used. Another topic of further research is how the combination of dimensions can be optimized for each essay set by using the collection specific features without the validation procedure proposed in Kakkonen et al. (2005) . Currently, we have not implemented a version of LSA that combines scores from several models but we will analyze the possibilities for that in future research. Nevertheless, LSA representations for different dimensions form a nested sequence because of the number of singular values taken to approximate the original matrix. This will make the model combination less effective with LSA. This is not true for statistical models, such as PLSA, because they can capture a larger variety of the possible decompositions and thus several models can actually complement each other (Hofmann, 2001 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 803, |
|
"end": 825, |
|
"text": "Kakkonen et al. (2005)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1402, |
|
"end": 1416, |
|
"text": "(Hofmann, 2001", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We have implemented a system to assess essays written in Finnish. In this paper, we report a new extension to the system for analyzing the essays with PLSA method. We have compared LSA and PLSA as methods for essay grading. When our re-sults are compared to the correlations between human and system grades reported in literature, we have achieved promising results with all methods. LSA was slightly better when compared to PLSAbased methods. As future research, we are going to analyze if there are better methods to combine the similarity scores from several models in the context of essay grading to increase the accuracy (Hofmann, 2001) . Another interesting topic is to combine LSA and PLSA to compliment each other.", |
|
"cite_spans": [ |
|
{ |
|
"start": 626, |
|
"end": 641, |
|
"text": "(Hofmann, 2001)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Future Work and Conclusion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We used the cosine of the angle between the probability vectors as a measure of similarity in LSA and PLSA. Other methods are proposed to determine the similarities between probability distributions produced by PLSA (Girolami and Kab\u00e1n, 2003; Blei et al., 2003) . The effects of using these techniques will be compared in the future experiments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 216, |
|
"end": 242, |
|
"text": "(Girolami and Kab\u00e1n, 2003;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 243, |
|
"end": 261, |
|
"text": "Blei et al., 2003)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Future Work and Conclusion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "If the PLSA models with different numbers of latent variables are not highly dependent on each other, this would allow us to analyze the reliability of the grades given by the system. This is not possible with LSA based methods as they are normally highly dependent on each other. However, this will need further work to examine all the potentials.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Future Work and Conclusion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our future aim is to develop a semi-automatic essay assessment system . For determining the grades or giving feedback to the student, the system needs a method for comparing similarities between the texts. LSA and PLSA offer a feasible solution for the purpose. In order to achieve even more accurate grading, we can use some of the results and techniques developed for LSA and develop them further for both methods. We are currently working with an extension to our LSA model that uses standard validation methods for reducing automatically the irrelevant content informa-tion in LSA-based essay grading (Kakkonen et al., 2005) . In addition, we plan to continue the work with PLSA, since it, being a probabilistic model, introduces new possibilities, for instance, in similarity comparison and feedback giving.", |
|
"cite_spans": [ |
|
{ |
|
"start": 605, |
|
"end": 628, |
|
"text": "(Kakkonen et al., 2005)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Future Work and Conclusion", |
|
"sec_num": "4" |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Latent Dirichlet Allocation", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Blei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Jordan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "J. of Machine Learning Research", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "993--1022", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Blei, A. Y. Ng, and M. I. Jordan. 2003. La- tent Dirichlet Allocation. J. of Machine Learning Re- search, 3:993-1022.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Benefits of modularity in an automated scoring system", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Burstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Marcu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proc. of the Workshop on Using Toolsets and Architectures to Build NLP Systems, 18th Int'l Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Burstein and D. Marcu. 2000. Benefits of modularity in an automated scoring system. In Proc. of the Work- shop on Using Toolsets and Architectures to Build NLP Systems, 18th Int'l Conference on Computational Lin- guistics, Luxembourg.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "The e-rater scoring engine: Automated essay scoring with natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Burstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Burstein. 2003. The e-rater scoring engine: Auto- mated essay scoring with natural language process- ing. In M. D. Shermis and J. Burstein, editors, Auto- mated essay scoring: A cross-disciplinary perspective. Lawrence Erlbaum Associates, Hillsdale, NJ.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Indexing By Latent Semantic Analysis", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Deerwester", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Dumais", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Furnas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Landauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Harshman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "J. of the American Society for Information Science", |
|
"volume": "41", |
|
"issue": "", |
|
"pages": "391--407", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Deerwester, S. T. Dumais, G. W. Furnas, T. K. Lan- dauer, and R. Harshman. 1990. Indexing By Latent Semantic Analysis. J. of the American Society for In- formation Science, 41:391-407.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Maximum likelihood from incomplete data via the em algorithm", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Dempster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Laird", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Rubin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1977, |
|
"venue": "J. of the Royal Statistical Society", |
|
"volume": "39", |
|
"issue": "", |
|
"pages": "1--38", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. P. Dempster, N. M. Laird, and D. B. Rubin. 1977. Maximum likelihood from incomplete data via the em algorithm. J. of the Royal Statistical Society, 39:1-38.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Automated Essay Scoring: Applications to Educational Technology", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Foltz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Laham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Landauer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proc. of Wolrd Conf. Educational Multimedia, Hypermedia & Telecommunications", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. W. Foltz, D. Laham, and T. K. Landauer. 1999a. Au- tomated Essay Scoring: Applications to Educational Technology. In Proc. of Wolrd Conf. Educational Mul- timedia, Hypermedia & Telecommunications, Seattle, USA.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "The Intelligent Essay Assessor: Applications to Educational Technology. Interactive Multimedia Electronic J. of Computer-Enhanced Learning", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Foltz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Laham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Landauer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. W. Foltz, D. Laham, and T. K. Landauer. 1999b. The Intelligent Essay Assessor: Applications to Educational Technology. Interactive Multime- dia Electronic J. of Computer-Enhanced Learning, 1. http://imej.wfu.edu/articles/1999/ 2/04/index.asp (Accessed 3.4.2005).", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "On an Equivalence between PLSI and LDA", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Girolami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Kab\u00e1n", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proc. of the 26th Annual Int'l ACM SIGIR Conf. on Research and Development in Informaion Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "433--434", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Girolami and A. Kab\u00e1n. 2003. On an Equivalence be- tween PLSI and LDA. In Proc. of the 26th Annual Int'l ACM SIGIR Conf. on Research and Development in In- formaion Retrieval, pages 433-434, Toronto, Canada. ACM Press.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "The Debate on Automated Essay Grading", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Hearst", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Kukich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Light", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Hirschman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Burger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Breck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Ferro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Landauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Laham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Foltz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Calfee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "IEEE Intelligent Systems", |
|
"volume": "15", |
|
"issue": "", |
|
"pages": "22--37", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Hearst, K. Kukich, M. Light, L. Hirschman, J. Burger, E. Breck, L. Ferro, T. K. Landauer, D. Laham, P. W. Foltz, and R. Calfee. 2000. The Debate on Automated Essay Grading. IEEE Intelligent Systems, 15:22-37.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Unsupervised Learning by Probabilistic Latent Semantic Analysis", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Hofmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Machine Learning", |
|
"volume": "42", |
|
"issue": "", |
|
"pages": "177--196", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Hofmann. 2001. Unsupervised Learning by Proba- bilistic Latent Semantic Analysis. Machine Learning, 42:177-196.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Automatic Assessment of the Content of Essays Based on Course Materials", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Kakkonen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Sutinen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proc. of the Int'l Conf. on Information Technology: Research and Education", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "126--130", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Kakkonen and E. Sutinen. 2004. Automatic As- sessment of the Content of Essays Based on Course Materials. In Proc. of the Int'l Conf. on Information Technology: Research and Education, pages 126-130, London, UK.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Semi-Automatic Evaluation Features in Computer-Assisted Essay Assessment", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Kakkonen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Myller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Sutinen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proc. of the 7th IASTED Int'l Conf. on Computers and Advanced Technology in Education", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "456--461", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Kakkonen, N. Myller, and E. Sutinen. 2004. Semi- Automatic Evaluation Features in Computer-Assisted Essay Assessment. In Proc. of the 7th IASTED Int'l Conf. on Computers and Advanced Technology in Ed- ucation, pages 456-461, Kauai, Hawaii, USA.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Comparison of Dimension Reduction Methods for Automated Essay Grading", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Kakkonen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Myller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Sutinen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Timonen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Kakkonen, N. Myller, E. Sutinen, and J. Timonen. 2005. Comparison of Dimension Reduction Methods for Automated Essay Grading. Submitted.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "How well can passage meaning be derived without using word order? A comparison of Latent Semantic Analysis and humans", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Landauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Laham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Rehder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Schreiner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Proc. of the 19th Annual Meeting of the Cognitive Science Society", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. K. Landauer, D. Laham, B. Rehder, and M. E. Schreiner. 1997. How well can passage meaning be derived without using word order? A comparison of Latent Semantic Analysis and humans. In Proc. of the 19th Annual Meeting of the Cognitive Science Society, Mawhwah, NJ. Erlbaum.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Introduction to latent semantic analysis. Discourse Processes", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Landauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Foltz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Laham", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "", |
|
"volume": "25", |
|
"issue": "", |
|
"pages": "259--284", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. K. Landauer, P. W. Foltz, and D. Laham. 1998. In- troduction to latent semantic analysis. Discourse Pro- cesses, 25:259-284.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "A System to Assess the Semantic Content of Student Essays", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Lemaire", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Dessus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "J. of Educational Computing Research", |
|
"volume": "24", |
|
"issue": "", |
|
"pages": "305--320", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "B. Lemaire and P. Dessus. 2001. A System to Assess the Semantic Content of Student Essays. J. of Educational Computing Research, 24:305-320.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "The computer moves into essay grading", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Page", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Petersen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Phi Delta Kappan", |
|
"volume": "76", |
|
"issue": "", |
|
"pages": "561--565", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "E. B. Page and N. S. Petersen. 1995. The computer moves into essay grading. Phi Delta Kappan, 76:561- 565.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "The imminence of grading essays by computer", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Page", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1966, |
|
"venue": "Phi Delta Kappan", |
|
"volume": "47", |
|
"issue": "", |
|
"pages": "238--243", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "E. B. Page. 1966. The imminence of grading essays by computer. Phi Delta Kappan, 47:238-243.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "On-line Grading of Student Essays: PEG goes on the World Wide Web. Assessment & Evaluation in Higher Education", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Shermis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Mzumara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Olson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Harrington", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "", |
|
"volume": "26", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. D. Shermis, H. R. Mzumara, J. Olson, and S. Harring- ton. 2001. On-line Grading of Student Essays: PEG goes on the World Wide Web. Assessment & Evalua- tion in Higher Education, 26:247.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Summary Street: an LSA Based Intelligent Tutoring System for Writing and Revising Summaries", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Steinhart", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Steinhart. 2000. Summary Street: an LSA Based Intel- ligent Tutoring System for Writing and Revising Sum- maries. Ph.D. thesis, University of Colorado, Boulder, Colorado.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Select-a-Kibitzer: A computer tool that gives meaningful feedback on student compositions", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Wiemer-Hastings", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Graesser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Interactive Learning Environments", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "149--169", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Wiemer-Hastings and A. Graesser. 2000. Select-a- Kibitzer: A computer tool that gives meaningful feed- back on student compositions. Interactive Learning Environments, 8:149-169.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Approximate natural language understanding for an intelligent tutor", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Wiemer-Hastings", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Wiemer-Hastings", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Graesser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proc. of the 12th Int'l Artificial Intelligence Research Symposium", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "172--176", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Wiemer-Hastings, K. Wiemer-Hastings, and A. Graesser. 1999. Approximate natural lan- guage understanding for an intelligent tutor. In Proc. of the 12th Int'l Artificial Intelligence Research Symposium, pages 172-176, Menlo Park, CA, USA.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "The grading process of AEA.", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"text": "Set LSA LSA LSA LSA PLSA PLSA PLSA PLSA PLSA-C PLSA-C PLSA-C No. dim. same adj.", |
|
"html": null, |
|
"content": "<table><tr><td/><td/><td>cor.</td><td>dim.</td><td>same</td><td>adj.</td><td>cor.</td><td>same</td><td>adj.</td><td>cor.</td></tr><tr><td>1</td><td>14</td><td>39.7 43.9 0.78</td><td>9</td><td>31.5</td><td>32.9</td><td>0.66</td><td>34.2</td><td>35.6</td><td>0.70</td></tr><tr><td>2</td><td colspan=\"2\">124 35.6 49.3 0.80</td><td>83</td><td>37.0</td><td>37.0</td><td>0.76</td><td>35.6</td><td>41.1</td><td>0.73</td></tr><tr><td>3</td><td>8</td><td>31.1 28.9 0.54</td><td>38</td><td>24.4</td><td>35.6</td><td>0.41</td><td>17.7</td><td>24.4</td><td>0.12</td></tr><tr><td>4</td><td>5</td><td>24.4 42.3 0.57</td><td>92</td><td>35.6</td><td>31.1</td><td>0.59</td><td>22.2</td><td>35.6</td><td>0.47</td></tr><tr><td>5</td><td>6</td><td>29.6 48.2 0.88</td><td>16</td><td>18.5</td><td>18.5</td><td>0.78</td><td>11.1</td><td>40.1</td><td>0.68</td></tr><tr><td>6</td><td>6</td><td>44.4 37.1 0.90</td><td>55</td><td>33.3</td><td>44.4</td><td>0.88</td><td>14.8</td><td>40.7</td><td>0.79</td></tr></table>", |
|
"type_str": "table", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |