Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "C08-1023",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T12:24:26.799727Z"
},
"title": "Pedagogically Useful Extractive Summaries for Science Education",
"authors": [
{
"first": "Sebastian",
"middle": [],
"last": "De La Chica",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of Colorado at Boulder sebastian.delachica",
"location": {}
},
"email": ""
},
{
"first": "Faisal",
"middle": [],
"last": "Ahmad",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of Colorado at Boulder sebastian.delachica",
"location": {}
},
"email": ""
},
{
"first": "James",
"middle": [
"H"
],
"last": "Martin",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of Colorado at Boulder sebastian.delachica",
"location": {}
},
"email": ""
},
{
"first": "Tamara",
"middle": [],
"last": "Sumner",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of Colorado at Boulder sebastian.delachica",
"location": {}
},
"email": "[email protected]"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "This paper describes the design and evaluation of an extractive summarizer for educational science content called COGENT. COGENT extends MEAD based on strategies elicited from an empirical study with science domain and instructional design experts. COGENT identifies sentences containing pedagogically relevant concepts for a specific science domain. The algorithms pursue a hybrid approach integrating both domain independent bottom-up sentence scoring features and domain-aware top-down features. Evaluation results indicate that COGENT outperforms existing summarizers and generates summaries that closely resemble those generated by human experts. COGENT concept inventories appear to also support the computational identification of student misconceptions about earthquakes and plate tectonics.",
"pdf_parse": {
"paper_id": "C08-1023",
"_pdf_hash": "",
"abstract": [
{
"text": "This paper describes the design and evaluation of an extractive summarizer for educational science content called COGENT. COGENT extends MEAD based on strategies elicited from an empirical study with science domain and instructional design experts. COGENT identifies sentences containing pedagogically relevant concepts for a specific science domain. The algorithms pursue a hybrid approach integrating both domain independent bottom-up sentence scoring features and domain-aware top-down features. Evaluation results indicate that COGENT outperforms existing summarizers and generates summaries that closely resemble those generated by human experts. COGENT concept inventories appear to also support the computational identification of student misconceptions about earthquakes and plate tectonics.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Multidocument summarization (MDS) research efforts have resulted in significant advancements in algorithm and system design (Mani, 2001 ). Many of these efforts have focused on summarizing news articles, but not significantly explored the research issues arising from processing educational content to support pedagogical applications. This paper describes our research into the application of MDS techniques to educational science content to generate pedagogically useful summaries.",
"cite_spans": [
{
"start": 124,
"end": 135,
"text": "(Mani, 2001",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Knowledge maps are graphical representations of domain information laid out as networks of nodes containing rich concept descriptions interconnected using a fixed set of relationship types (Holley and Dansereau, 1984) . Knowledge maps are a variant of the concept maps used to capture, assess, and track student knowledge in education research (Novak and Gowin, 1984) . Learning research indicates that knowledge maps may be useful cognitive scaffolds, helping users lacking domain expertise to understand the macro-level structure of an information space (O'Donnell et al., 2002) . Knowledge maps have emerged as an effective representation to generate conceptual browsers that help students navigate educational digital libraries, such as the Digital Library for Earth System Education (DLESE.org) (Butcher et al., 2006) . In addition, knowledge maps have proven useful for domain and instructional experts to capture domain knowledge from digital library resources and to analyze student understanding for the purposes of providing formative assessments (Ahmad et al., 2007) .",
"cite_spans": [
{
"start": 189,
"end": 217,
"text": "(Holley and Dansereau, 1984)",
"ref_id": "BIBREF6"
},
{
"start": 344,
"end": 367,
"text": "(Novak and Gowin, 1984)",
"ref_id": "BIBREF13"
},
{
"start": 556,
"end": 580,
"text": "(O'Donnell et al., 2002)",
"ref_id": "BIBREF14"
},
{
"start": 800,
"end": 822,
"text": "(Butcher et al., 2006)",
"ref_id": "BIBREF1"
},
{
"start": 1057,
"end": 1077,
"text": "(Ahmad et al., 2007)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Knowledge maps have proven useful both as representations of knowledge for assessment purposes and as learning resources for presentation to students. However, domain knowledge map construction by experts is an expensive knowledge engineering activity. In this paper, we describe our progress towards the automated generation of pedagogically useful extractive summaries from educational texts about a science domain. In the context of automated knowledge map generation, summary sentences correspond to concepts. While the detection of relationships between concepts is also part of our overall research agenda, this paper focuses solely on concept identification using MDS techniques. The remainder of this paper is organized as fol-lows. First, we review related work in the areas of automated concept extraction from texts and extractive summarization. We then describe the empirical study we have conducted to understand how domain and instructional design experts identify pedagogically important science concepts in educational digital library resources. Next, we provide a detailed description of the algorithms we have designed based on expert strategies elicited from our empirical study. We then present and discuss our evaluation results using automated summarization metrics and human judgments. Finally, we present our conclusions and future work in this area.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Our work is informed by efforts to automate the acquisition of ontology concepts from text. On-toLearn (Navigli and Velardi, 2004) extracts domain terminology from a collection of texts using a syntactic parse to identify candidate terms that are filtered based on domain relevance and connected using a semantic interpretation based on word sense disambiguation. The newly identified concepts and relationships are used to update an existing ontology. Knowledge Puzzle focuses on n-grams to produce candidate terms filtered based on term frequency in the input documents and on the number of relationships associated with a given term (Zouaq et al., 2007) . This approach leverages pattern extraction techniques to identify concepts and relationships. While these approaches produce ontologies useful for computational purposes, the identified concepts are of a very fine granularity and therefore may yield graphs not suitable for identifying student misconceptions or for presentation back to the student. Clustering by committee has also been used to discover concepts from a text by grouping terms into conceptually related clusters (Lin and Pantel, 2002) . The resulting clusters appear to be tightly related, but operate at a very fine level of granularity. Our approach focuses on sentences as units of knowledge to produce concise representations that may be useful both as computational objects and as learning resources to present back to the student. Therefore, extractive summarization research also informs our work.",
"cite_spans": [
{
"start": 103,
"end": 130,
"text": "(Navigli and Velardi, 2004)",
"ref_id": "BIBREF12"
},
{
"start": 636,
"end": 656,
"text": "(Zouaq et al., 2007)",
"ref_id": "BIBREF19"
},
{
"start": 1138,
"end": 1160,
"text": "(Lin and Pantel, 2002)",
"ref_id": "BIBREF9"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "Topic representation and topic themes have been used to explore promising MDS techniques (Harabagiu and Lacatusu, 2005) . Recent efforts in graph-based MDS have integrated sentence affinity, information richness and diversity penalties to produce very promising results (Wan and Yang, 2006) . Finally, MEAD is a widely used multi-document summarization and evaluation platform (Radev et al., 2000) . MEAD research efforts have resulted in significant contributions to support the development of summarization applications (Radev et al., 2000) . While all these systems have produced promising results in automated evaluations, none have directly targeted educational content as input or the generation of pedagogically useful summaries. We are directly building upon MEAD due its focus on sentence extraction and its high degree of modularization.",
"cite_spans": [
{
"start": 89,
"end": 119,
"text": "(Harabagiu and Lacatusu, 2005)",
"ref_id": "BIBREF3"
},
{
"start": 270,
"end": 290,
"text": "(Wan and Yang, 2006)",
"ref_id": "BIBREF18"
},
{
"start": 377,
"end": 397,
"text": "(Radev et al., 2000)",
"ref_id": "BIBREF16"
},
{
"start": 522,
"end": 542,
"text": "(Radev et al., 2000)",
"ref_id": "BIBREF16"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "We have conducted a study to capture how human experts construct and use knowledge maps. In this 10-month study, we examined how experts created knowledge maps from educational digital libraries and how they used the maps to assess student work and provide personalized feedback.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Empirical Study",
"sec_num": "3"
},
{
"text": "In this paper, we are focusing on the knowledge map construction aspects of the study. Four geology and instructional design experts collaboratively selected 20 resources from DLESE to construct a domain knowledge map on earthquakes and plates tectonics for high school age learners. The experts independently created knowledge maps of individual resources which they collaboratively merged into the final domain knowledge map in a one-day workshop. The resulting domain knowledge map consisted of 564 nodes containing domain concepts and 578 relationships. The concepts consist of 7,846 words, or 5% of the total number of words in the original resources. Figure 1 shows a fragment of the domain knowledge map created by our experts. Experts created nodes containing concepts of varying granularity, including nouns, noun phrases, partial sentences, single sentences, and multiple sentences. Our analysis of this domain knowledge map indicates that experts relied on copying-and-pasting (58%) and paraphrasing (37%) to create most domain concepts. Only 5% of the nodes could not be traced directly to the original resources.",
"cite_spans": [],
"ref_spans": [
{
"start": 657,
"end": 665,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Empirical Study",
"sec_num": "3"
},
{
"text": "Experts used relationship types in a Zipf-like distribution with the top 10 relationship types accounting for 64% of all relationships. The top 2 relationship types each accounted for more than 10% of all relationships: elaborations (19% or 110 links) and examples (14% or 78 links).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Empirical Study",
"sec_num": "3"
},
{
"text": "We have established the completeness of this domain knowledge map by asking a domain expert to assess its content coverage of nationallyrecognized educational goals on earthquakes and plate tectonics for high school age learners using the American Association for the Advancement of Science (AAAS) Benchmarks (Project 2061 (Project , 1993 . The results indicate adequate content coverage of the relevant AAAS Benchmarks achieved through 82 of the concepts (15%) with the remaining 482 concepts (85%) providing very detailed elaborations of the associated learning goals.",
"cite_spans": [
{
"start": 309,
"end": 322,
"text": "(Project 2061",
"ref_id": null
},
{
"start": 323,
"end": 338,
"text": "(Project , 1993",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Empirical Study",
"sec_num": "3"
},
{
"text": "Qualitative analysis of the verbal protocols captured during the study indicates that all experts used external sources to construct the domain knowledge map. Experts made references to their own knowledge (e.g., \"I know that\u2026\"), to content learned or taught in geology courses, to other resources used in the study, and to the National Science Education Standards (NSES), a comprehensive collection of nationallyrecognized science learning goals for K-12 students (National Research Council, 1996) .",
"cite_spans": [
{
"start": 465,
"end": 498,
"text": "(National Research Council, 1996)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Empirical Study",
"sec_num": "3"
},
{
"text": "We have examined sentence extraction agreement between experts using a kappa measure that accounts for prevalence of judgments and conflicting biases amongst experts, called PABAkappa (Byrt et al., 1993) . The average PABAkappa value of 0.62 indicates that our experts substantially agree on sentence extraction from digital library resources. While this study was not designed as an annotation project to support summarization evaluation, this level of agreement indicates that the concepts selected by the experts may serve as the reference summary to evaluate the performance of our summarizer.",
"cite_spans": [
{
"start": 184,
"end": 203,
"text": "(Byrt et al., 1993)",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Empirical Study",
"sec_num": "3"
},
{
"text": "Creating a knowledge map from a collection of input texts involves identifying sentences containing important domain concepts, linking con-cepts, and labeling those links. This paper focuses solely on identifying and extracting pedagogically relevant sentences as domain concepts. We have designed and implemented an extractive summarizer for educational science content, called COGENT, based on MEAD version 3.11 (Radev et al., 2000) . COGENT processes a collection of educational digital library resources by first preprocessing each resource using Tidy (tidy.sourceforge.net) to fix improperly formatted HTML code. COGENT then merges multiple web pages into a single HTML document and extracts the contents of each resource into a plain text file. We have extended MEAD with sentence scoring features based on domain content, document structure, and sentence length.",
"cite_spans": [
{
"start": 414,
"end": 434,
"text": "(Radev et al., 2000)",
"ref_id": "BIBREF16"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Summarizer for Science Education",
"sec_num": "4"
},
{
"text": "We have designed two sentence-scoring features that aim to capture the domain content relevance of each sentence: the educational standards feature and the gazetteer feature.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Domain Content",
"sec_num": "4.1"
},
{
"text": "We have developed a feature that models how human experts used external sources to identify and extract concepts. The educational standards feature uses the textual description of the relevant AAAS Benchmarks on earthquakes and plate tectonics for high-school age learners and the associated NSES. Each sentence receives a score based on its similarity to the text contents of the learning goals and educational standards computed using a TFIDF (Term Frequency-Inverse Document Frequency) approach (Salton and Buckley, 1988) . We have used KinoSearch, a Perl implementation of the Lucene search engine (lucene.apache.org), to create an index that includes the AAAS Benchmarks learning goal description (boosted by 2), subject (boosted by 8), and keywords (boosted by 2), plus the text of the associated national standards (not boosted). Sentence scores are based on the similarity scores generated by KinoSearch in response to a query consisting of the sentence text.",
"cite_spans": [
{
"start": 498,
"end": 524,
"text": "(Salton and Buckley, 1988)",
"ref_id": "BIBREF17"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Domain Content",
"sec_num": "4.1"
},
{
"text": "To account for the large number of examples used by the experts in the domain knowledge map (14% of all links), we have developed a feature that reflects the number and relevance of the geographical names in each sentence. Earth science examples often refer to names of geographical places, including geological formations on the planet. The gazetteer feature leverages the Alexandria Digital Library (ADL) Gazetteer service (Hill, 2000) to check whether named entities identified in each sentence match entries in the ADL Gazetteer. A gazetteer is a georeferencing resource containing information about locations and place-names, including latitude and longitude as well as type information about the corresponding geographical feature. Each sentence receives a score based on a TFIDF approach where the TF is the number of times a particular location name appears in the sentence and the IDF is the inverse of the count of gazetteer entries matching the location name. If the ADL Gazetteer returns a large number of results for a given place-name, it means there are many geographical locations identified by that name. Our assumption is that unique names may be more pedagogically relevant. For example, Ohio receives an IDF score of 0.0625 because the ADL Gazetteer contains 16 entries so named, while the Mid-Atlantic Ridge, the distinctive underwater mountain range dividing the Atlantic Ocean, receives a score of 1.0 as it appears only once.",
"cite_spans": [
{
"start": 425,
"end": 437,
"text": "(Hill, 2000)",
"ref_id": "BIBREF5"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Domain Content",
"sec_num": "4.1"
},
{
"text": "Based on the intuition that the HTML structure of a web site reflects content relevancy, we have developed the hypertext feature. The hypertext feature assigns a higher score to sentences contained under higher level HTML headings.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Document Structure",
"sec_num": "4.2"
},
{
"text": "1/1 = 1.00 H2 1/2 = 0.50 H3 1/3 = 0.33 H4 1/4 = 0.25 H5 1/5 = 0.20 H6 1/6 = 0.17 where heading_bonus is obtained from Table 1 , par_no is the paragraph number within the heading, and sent_no is the sentence number within the paragraph. We use the 4 1 x function to attenuate the contributions to the feature score of later paragraphs and sentences. Initially, we used the same function MEAD uses to modulate its position feature ( 2 1 x ), but initial experimenta-tion indicated this function decayed too rapidly, resulting in later sentences being over-penalized.",
"cite_spans": [],
"ref_spans": [
{
"start": 118,
"end": 125,
"text": "Table 1",
"ref_id": "TABREF0"
}
],
"eq_spans": [],
"section": "Bonus H1",
"sec_num": null
},
{
"text": "To promote the extraction of sentences containing scientific concepts, we have developed the content word density feature. This feature makes a cut-off decision based on the ratio of content words to function words in a sentence. The content word density feature uses a pre-populated list of function words (a stopword list) to calculate the ratio of content to function words within each sentence, keeping sentences that meet or exceed the ratio of 50%. This cut-off value implies that the extracted sentences contain relatively more content words than function words.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Sentence Length",
"sec_num": "4.3"
},
{
"text": "We compute the final score of each sentence by adding the scores obtained for the MEAD default configuration features (centroid and position) to the scores for the COGENT features (educational standards, gazetteer, and hypertext). After the sentences have been sorted according to their cumulative scores, we keep sentences that pass the cut-off constraints, including the MEAD length feature equal or greater than 9 and CO-GENT content word density equal or greater than 50%. We use the MEAD cosine re-ranker to eliminate redundant sentences based on a cutoff similarity value of 0.7. Since human experts used only 5% of the total word count in the resources, we have configured MEAD to use a 5% word compression rate.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Sentence Scoring and Selection",
"sec_num": "4.4"
},
{
"text": "We have evaluated COGENT by processing the 20 digital library resources used in the empirical study and comparing its output against the concepts identified by the experts.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation",
"sec_num": "5"
},
{
"text": "To assess the quality of the generated summaries, we have examined three configurations: Random, Default, and COGENT. The Random configuration extracts a random collection of sentences from the input texts. The Default configuration uses the MEAD default centroid, position and length (cut-off value of 9) sentence scoring features. Finally, the COGENT configuration includes the MEAD default features and the CO-GENT features. The Default and COGENT configurations use the MEAD cosine function with a threshold of 0.7 to eliminate redundant sen-tences. All three configurations use a word compression factor of 5% resulting in summaries of very similar length. For this evaluation, we leverage ROUGE (Lin and Hovy, 2003) to address the relative quality of the generated summaries based on common ngram counts and longest common subsequence (LCS). We report on ROUGE-1 (unigrams), ROUGE-2 (bigrams), ROUGE W-1.2 (weighted LCS), and ROUGE-S* (skip bigrams) as they appear to correlate well with human judgments for longer multi-document summaries, particularly ROUGE-1 (Lin, 2004) . Table 2 shows the results of this ROUGE-based evaluation including recall (R), precision (P), and balanced fmeasure (F). COGENT consistently outperforms the Random and Default baselines based on all four reported ROUGE measures. Given that much of the original research efforts on MEAD have centered on news articles, this result is not surprising. Pedagogical content, such as the educational digital library resources used in our work, differs in rhetorical intent, structure and terminology from the news articles leveraged by the MEAD researchers. However, the COGENT features described here are complementary to the default MEAD configuration. COGENT can best be characterized as a hybrid MDS, integrating bottom-up (centroid, position, length, hypertext, and content word density) and top-down (educational standards and gazetteer) sentence scoring features. This hybrid approach reflects our findings from observing expert behaviors for identifying concepts from educational digital library resources. We believe the overall improvement in quality scores may be due to the COGENT features targeting different dimensions of what con-stitutes a pedagogically effective summary than the default MEAD features.",
"cite_spans": [
{
"start": 701,
"end": 721,
"text": "(Lin and Hovy, 2003)",
"ref_id": "BIBREF8"
},
{
"start": 1068,
"end": 1079,
"text": "(Lin, 2004)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [
{
"start": 1082,
"end": 1089,
"text": "Table 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Quality",
"sec_num": "5.1"
},
{
"text": "To characterize the COGENT summary contents, one of our research team members manually generated a summary corresponding to the best case for an extractive summarizer. This Best Case summary comprises the sentences from the digital library resources that align to the concepts selected by the human experts in our empirical study. Since the experts created concepts of varying granularity, this alignment produces the list of sentences that the experts would have produced if they had only selected single sentences to create concepts for their domain knowledge map. This summary comprises 621 sentences consisting of 13,116 words, or about a 9% word compression.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quality",
"sec_num": "5.1"
},
{
"text": "For this aspect of the evaluation, we have used ROUGE-L, an LCS metric computed using ROUGE. The ROUGE-L computation examines the union LCS between each reference sentence and all the sentences in the candidate summary. We believe this metric may be well-suited to reflect the degree of linguistic surface structure similarity between summaries. We postulate that ROUGE-L may be able to account for the explicitly copy-pasted concepts and to detect the more subtle similarities with paraphrased concepts in the expert-generated domain knowledge map. We have also used the content-based evaluation capabilities of MEAD to report on a cosine measure to capture similarity between the candidate summaries and the reference. Table 3 shows the results of this aspect of the evaluation including recall (R), precision (P), and balanced fmeasure (F). Table 3 . Content-based evaluation results (word compression in parentheses)",
"cite_spans": [],
"ref_spans": [
{
"start": 721,
"end": 728,
"text": "Table 3",
"ref_id": null
},
{
"start": 844,
"end": 851,
"text": "Table 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "Quality",
"sec_num": "5.1"
},
{
"text": "COGENT consistently outperforms the Random and Default baselines on both the ROUGE-L and cosine measures. Given the cosine value of 0.8325, it appears COGENT extracts sentences containing similar terms in very similar frequency distribution as the experts.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Random",
"sec_num": null
},
{
"text": "The ROUGE-L scores also consistently indicate that the COGENT summary may be closer to the reference summary in relative word order-ing than either the Random or Default configurations. However, the scores for the Best Case summary reveal two interesting points. First, the ROUGE-L recall score for COGENT (R=0. 6021) is lower than that obtained by the Best Case summary (R=0.9669), meaning our summarizer appears to be extracting different sentences than those selected by the experts. Given the high cosine similarity with the reference summary (0.8325), we hypothesize that CO-GENT may be selecting sentences that cover very similar concepts to those selected by the experts only expressed differently. Second, we would have expected the ROUGE-L precision score for the Best Case configuration to be closer to 1.0. Instead, the Best Case precision score is 0.6256, only a minor improvement over CO-GENT (P=0.5982). Since the sentences in the Best Case summary come directly from the digital library resources, we hypothesize that experts may have used extensive linguistic transformations for paraphrased concepts, resulting in structures that ROUGE-L could not identify as similar.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Random",
"sec_num": null
},
{
"text": "Given the difference in word compression for the Best Case summary, we have performed an incremental analysis using the ROUGE-L measure shown in Figure 2 . This graph shows improved COGENT performance in ROUGE-L recall as the length of the summary increases, while both precision and fmeasure degrade. COGENT can match the recall scores of the Best Case summary (R=0.9669) by making the generated summary longer (30% word compression rate or 32,619 words), but the precision would suffer a sizeable decay (P=0.1558). For educational applications, more comprehensive concept inventories (longer summaries) may be better suited for computational purposes, such as pedagogical reasoning about student understanding, while more succinct inventories (shorter summaries) may be more appropriate for display to the student.",
"cite_spans": [],
"ref_spans": [
{
"start": 145,
"end": 153,
"text": "Figure 2",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "Random",
"sec_num": null
},
{
"text": "We have evaluated COGENT's pedagogical utility in the context of computationally identifying student scientific misconceptions. We have developed algorithms that reliably detect incorrect statements in student essays by comparing an expert-created domain knowledge map to an expert-created knowledge map of an essay. These algorithms use textual entailment techniques based on a shallow linguistic analysis of knowledge map concepts to identify sentences that contradict concepts in the domain knowledge map. Initial evaluation results indicate that these algorithms identify incorrect statements nearly as adeptly as human experts. Table 4 . Incorrect statement identification evaluation results",
"cite_spans": [],
"ref_spans": [
{
"start": 633,
"end": 640,
"text": "Table 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Pedagogical Utility",
"sec_num": "5.2"
},
{
"text": "As shown in Table 4 , the algorithms detect 87% of all incorrect statements identified by experts and 57% of the reported incorrect statements agree with human judgments on the same task. By comparison, experts show 69% overlap on average along both dimensions. Introducing the COGENT concept inventory in place of the expert-created domain knowledge map improves recall performance, as the algorithms return 93% of all incorrect statements reported by the experts, while preserving 57% precision. These results indicate that the generated summary covers the necessary pedagogical concepts to computationally identify student scientific misconceptions.",
"cite_spans": [],
"ref_spans": [
{
"start": 12,
"end": 19,
"text": "Table 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Manual",
"sec_num": null
},
{
"text": "Informal sampling of the sentences selected by COGENT shows the following three important science concepts receiving the highest scores: 1. Earthquakes are the result of forces deep within the Earth's interior that continuously affect the surface of the Earth. 2. Scientists believed that the movement of the Earth's plates bends and squeezes the rocks at the edges of the plates. 3. In particular, four major scientific developments spurred the formulation of the plate-tectonics theory: (1) demonstration of the ruggedness and youth of the ocean floor;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Manual",
"sec_num": null
},
{
"text": "(2) confirmation of repeated reversals of the Earth magnetic field in the geologic past;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Manual",
"sec_num": null
},
{
"text": "(3) emergence of the seafloor-spreading hypothesis and associated recycling of oceanic crust; and (4) precise documentation that the world's earthquake and volcanic activity is concentrated along oceanic trenches and submarine mountain ranges. For a more rigorous analysis of the pedagogical utility of the COGENT concepts, we asked an instructional expert with domain expertise in geology to evaluate the 326 sentences returned by COGENT. The expert used a 5-point Likert scale to judge whether each concept would be pedagogically useful in the context of a concept inventory on earthquakes and plate tectonics knowledge for high school age learners. The expert agreed or strongly agreed that 60% of the sentences would be pedagogically useful, with 30% of the sentences being potentially useful and only 10% of the sentences being judged as not useful. These results indicate that COGENT appears to perform quite well at identifying sentences that contain information relevant for learning about the domain.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Manual",
"sec_num": null
},
{
"text": "We have also completed an ablation study to identify the relative contribution of the COGENT features to the quality of the summary. We have focused on the cosine metric to capture the overall similarity between the COGENT concept inventory and the concepts from the expert-created knowledge map. Table 5 . Feature ablation evaluation results for COGENT Table 5 shows the cosine similarity between the concept inventory generated after taking the feature shown in parentheses out of the summarizer. The results are ordered from low-to-high such that the feature contributing the most to the all-features cosine score appears at the top of the table. Removing either the gazetteer or the hypertext feature causes the largest drops in similarity indicating the importance of the use of examples and the relevance of document structure for the quality of the COGENT-generated summary. Meanwhile both the educational standards and content word density appear to provide modest but useful improvements to the quality of the COGENT summary.",
"cite_spans": [],
"ref_spans": [
{
"start": 297,
"end": 304,
"text": "Table 5",
"ref_id": null
},
{
"start": 354,
"end": 361,
"text": "Table 5",
"ref_id": null
}
],
"eq_spans": [],
"section": "Manual",
"sec_num": null
},
{
"text": "Given that our algorithms have only been evaluated on the topic of earthquakes and plate tectonics for high school age learners, COGENT may be limited in its ability to transcend domains due to its reliance on two domain-aware sentence scoring features: educational standards and gazetteer. However, the educational standards feature may be applicable across other science topics because the AAAS Benchmarks and NSES provide very thorough and detailed coverage of a wide range of topics for the Science, Technology, Engineering, and Math disciplines for grades K-12. Only the gazetteer feature would need to be replaced, especially given its significant contribution to the quality of the generated summary as indicated by the results of the ablation study. We believe these results highlight the need to generalize our approach, perhaps using a classifier for identifying examples in educational texts without resorting to overly domain-specific language resources, such as the ADL Gazetteer. Overall, the evaluation results indicate that our approach holds promise for effectively identifying concepts for inclusion in the construction of a pedagogically useful domain knowledge map from educational science content.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Manual",
"sec_num": null
},
{
"text": "In this paper, we have presented a multidocument summarization system, COGENT, that integrates bottom-up and top-down sentence scoring features to identify pedagogically relevant concepts from educational digital library resources. Our results indicate that COGENT generates concept inventories that resemble those identified by experts and outperforms existing multi-document summarization systems. We have also used the COGENT concept inventory as input to our misconception identification algorithms and the evaluation results indicate the algorithms perform as well as when using an expert-created domain knowledge map. In the context of generating domain knowledge maps, our next step is to explore how machine learning techniques may be employed to connect concepts with links.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions and Future Work",
"sec_num": "6"
},
{
"text": "Automating the process of creating inventories of important pedagogical concepts represents an important step towards creating scalable intelli-gent learning and tutoring systems. We hope our progress in this direction may contribute to increase the interest within the computational linguistics research community in novel educational technology research.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions and Future Work",
"sec_num": "6"
},
{
"text": "\u00a9 2008. Licensed under the Creative Commons Attribution-Noncommercial-Share Alike 3.0 Unported license (http://creativecommons.org/licenses/by-ncsa/3.0/). Some rights reserved.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "This research is funded in part by the National Science Foundation under NSF IIS/ALT Award 0537194. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the NSF.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgments",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Towards automatic conceptual personalization tools",
"authors": [
{
"first": "F",
"middle": [],
"last": "Ahmad",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "De La Chica",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Butcher",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Sumner",
"suffix": ""
},
{
"first": "J",
"middle": [
"H"
],
"last": "Martin",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 7th ACM/IEEE-CS Joint Conference on Digital Libraries",
"volume": "",
"issue": "",
"pages": "452--461",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ahmad, F., de la Chica, S., Butcher, K., Sumner, T. and Martin, J.H. (2007, June 17-23). Towards automatic conceptual personalization tools. In Pro- ceedings of the 7th ACM/IEEE-CS Joint Confer- ence on Digital Libraries, (Vancouver, British Co- lumbia, Canada, 2007), pages 452 -461.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Multimedia displays for conceptual discovery: information seeking with strand maps",
"authors": [
{
"first": "K",
"middle": [
"R"
],
"last": "Butcher",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Bhushan",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Sumner",
"suffix": ""
}
],
"year": 2006,
"venue": "ACM Multimedia Systems",
"volume": "11",
"issue": "3",
"pages": "236--248",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Butcher, K.R., Bhushan, S. and Sumner, T. (2006). Multimedia displays for conceptual discovery: in- formation seeking with strand maps. ACM Multi- media Systems, 11 (3), pages 236-248.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Bias, prevalence, and kappa",
"authors": [
{
"first": "T",
"middle": [],
"last": "Byrt",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Bishop",
"suffix": ""
},
{
"first": "J",
"middle": [
"B"
],
"last": "Carlin",
"suffix": ""
}
],
"year": 1993,
"venue": "Journal of Clinical Epidemiology",
"volume": "46",
"issue": "5",
"pages": "423--429",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Byrt, T., Bishop, J. and Carlin, J.B. (1993). Bias, prevalence, and kappa. Journal of Clinical Epide- miology, 46 (5), pages 423-429.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Topic themes for multi-document summarization",
"authors": [
{
"first": "S",
"middle": [],
"last": "Harabagiu",
"suffix": ""
},
{
"first": "F",
"middle": [],
"last": "Lacatusu",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of the 28th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval",
"volume": "",
"issue": "",
"pages": "202--209",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Harabagiu, S. and Lacatusu, F. (2005, August 15-19). Topic themes for multi-document summarization. In Proceedings of the 28th Annual International ACM SIGIR Conference on Research and Devel- opment in Information Retrieval, (Salvador, Brazil, 2005), pages 202-209.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Summarizing large document sets using concept-based clustering",
"authors": [
{
"first": "H",
"middle": [],
"last": "Hardy",
"suffix": ""
},
{
"first": "N",
"middle": [],
"last": "Shimizu",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Strzalkowski",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Ting",
"suffix": ""
},
{
"first": "G",
"middle": [
"B"
],
"last": "Wise",
"suffix": ""
},
{
"first": "X",
"middle": [],
"last": "Zhang",
"suffix": ""
}
],
"year": 2002,
"venue": "Proceedings of the Human Language Technology Conference",
"volume": "",
"issue": "",
"pages": "222--227",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hardy, H., Shimizu, N., Strzalkowski, T., Ting, L., Wise, G.B. and Zhang, X. (2002). Summarizing large document sets using concept-based cluster- ing. In Proceedings of the Human Language Tech- nology Conference 2002, (San Diego, California, United States, 2002), pages 222-227.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Core elements of digital gazetteers: placenames, categories, and footprints",
"authors": [
{
"first": "L",
"middle": [
"L"
],
"last": "Hill",
"suffix": ""
}
],
"year": 2000,
"venue": "Proceedings of the 4th European Conference on Digital Libraries",
"volume": "",
"issue": "",
"pages": "280--290",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hill, L.L. (2000, September 18-20). Core elements of digital gazetteers: placenames, categories, and footprints. In Proceedings of the 4th European Conference on Digital Libraries, (Lisbon, Portugal, 2000), pages 280-290.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Spatial learning strategies: Techniques, applications, and related issues",
"authors": [
{
"first": "C",
"middle": [
"D"
],
"last": "Holley",
"suffix": ""
},
{
"first": "D",
"middle": [
"F"
],
"last": "Dansereau",
"suffix": ""
}
],
"year": 1984,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Holley, C.D. and Dansereau, D.F. (1984). Spatial learning strategies: Techniques, applications, and related issues. Academic Press, Orlando, Florida.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "ROUGE: A package for automatic evaluation of summaries",
"authors": [
{
"first": "C",
"middle": [
"Y"
],
"last": "Lin",
"suffix": ""
}
],
"year": 2004,
"venue": "Proceedings of the Workshop on Text Summarization Branches Out",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Lin, C.Y. (2004). ROUGE: A package for automatic evaluation of summaries. In Proceedings of the Workshop on Text Summarization Branches Out, (Barcelona, Spain, 2004).",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Automatic evaluation of summaries using n-gram cooccurrence statistics",
"authors": [
{
"first": "C",
"middle": [
"Y"
],
"last": "Lin",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Hovy",
"suffix": ""
}
],
"year": 2003,
"venue": "Proceedings of the Human Language Technology Conference of the North American Chapter of the Association for Computational Linguistics, HLT-NAACL",
"volume": "",
"issue": "",
"pages": "71--78",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Lin, C.Y. and Hovy, E. (2003, May-June). Automatic evaluation of summaries using n-gram co- occurrence statistics. In Proceedings of the Human Language Technology Conference of the North American Chapter of the Association for Computa- tional Linguistics, HLT-NAACL, (Edmonton, Canada, 2003), pages 71-78.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Concept discovery from text",
"authors": [
{
"first": "D",
"middle": [],
"last": "Lin",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Pantel",
"suffix": ""
}
],
"year": 2002,
"venue": "Proceedings of the 19th International Conference on Computational Linguistics",
"volume": "",
"issue": "",
"pages": "1--7",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Lin, D. and Pantel, P. (2002, August 24-September 1). Concept discovery from text. In Proceedings of the 19th International Conference on Computational Linguistics, (Taipei, Taiwan, 2002), pages 1-7.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Automatic Summarization",
"authors": [
{
"first": "I",
"middle": [],
"last": "Mani",
"suffix": ""
}
],
"year": 2001,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mani, I. (2001). Automatic Summarization. Mitkov, R. (Ed.) John Benjamins B.V., Amsterdam, The Netherlands.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "National Science Education Standards",
"authors": [],
"year": 1996,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "National Research Council. (1996). National Science Education Standards. National Academy Press, Washington, DC.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Learning domain ontologies from document warehouses and dedicated websites",
"authors": [
{
"first": "R",
"middle": [],
"last": "Navigli",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Velardi",
"suffix": ""
}
],
"year": 2004,
"venue": "Computational Linguistics",
"volume": "30",
"issue": "2",
"pages": "151--179",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Navigli, R. and Velardi, P. (2004). Learning domain ontologies from document warehouses and dedi- cated websites. Computational Linguistics, 30 (2), pages 151-179.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Learning how to learn",
"authors": [
{
"first": "J",
"middle": [
"D"
],
"last": "Novak",
"suffix": ""
},
{
"first": "D",
"middle": [
"B"
],
"last": "Gowin",
"suffix": ""
}
],
"year": 1984,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Novak, J.D. and Gowin, D.B. (1984). Learning how to learn. Cambridge University Press, New York, New York.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Knowledge maps as scaffolds for cognitive processing",
"authors": [
{
"first": "A",
"middle": [
"M"
],
"last": "O'donnell",
"suffix": ""
},
{
"first": "D",
"middle": [
"F"
],
"last": "Dansereau",
"suffix": ""
},
{
"first": "R",
"middle": [
"H"
],
"last": "Hall",
"suffix": ""
}
],
"year": 2002,
"venue": "Educational Psychology Review",
"volume": "14",
"issue": "1",
"pages": "71--86",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "O'Donnell, A.M., Dansereau, D.F. and Hall, R.H. (2002). Knowledge maps as scaffolds for cognitive processing. Educational Psychology Review, 14 (1), pages 71-86.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Benchmarks for science literacy",
"authors": [],
"year": 1993,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Project 2061. (1993). Benchmarks for science liter- acy. Oxford University Press, New York, New York, United States.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Centroid-based summarization of multiple documents: sentence extraction, utility-based evaluation, and user studies",
"authors": [
{
"first": "D",
"middle": [
"R"
],
"last": "Radev",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "Jing",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Budzikowska",
"suffix": ""
}
],
"year": 2000,
"venue": "Proceedings of the ANLP/NAACL 2000 Workshop on Summarization",
"volume": "",
"issue": "",
"pages": "21--30",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Radev, D.R., Jing, H. and Budzikowska, M. (2000). Centroid-based summarization of multiple docu- ments: sentence extraction, utility-based evalua- tion, and user studies. In Proceedings of the ANLP/NAACL 2000 Workshop on Summariza- tion, (2000), pages 21-30.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Term-weighting approaches in automatic text retrieval",
"authors": [
{
"first": "G",
"middle": [],
"last": "Salton",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Buckley",
"suffix": ""
}
],
"year": 1988,
"venue": "Information Processing and Management",
"volume": "24",
"issue": "5",
"pages": "513--523",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Salton, G. and Buckley, C. (1988). Term-weighting approaches in automatic text retrieval. Information Processing and Management, 24 (5), pages 513- 523.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Improved affinity graph based multi-document summarization",
"authors": [
{
"first": "X",
"middle": [],
"last": "Wan",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Yang",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of the Human Language Technology Conference of the North American Chapter of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "181--184",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Wan, X. and Yang, J. (2006, June 5th-7th). Improved affinity graph based multi-document summariza- tion. In Proceedings of the Human Language Technology Conference of the North American Chapter of the Association for Computational Lin- guistics, (New York City, New York, 2006), pages 181-184.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Learning a domain ontology in the Knowledge Puzzle project",
"authors": [
{
"first": "A",
"middle": [],
"last": "Zouaq",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Nkambou",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Frasson",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the Fifth International Workshop on Ontologies and Semantic Web for E-Learning",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Zouaq, A., Nkambou, R. and Frasson, C. (2007, July 9-13). Learning a domain ontology in the Knowl- edge Puzzle project. In Proceedings of the Fifth In- ternational Workshop on Ontologies and Semantic Web for E-Learning, (Marina del Rey, California, 2007).",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"num": null,
"text": "Fragment of domain knowledge map created by domain and instructional experts",
"type_str": "figure",
"uris": null
},
"FIGREF1": {
"num": null,
"text": "COGENT ROUGE-L results at different word compression rates",
"type_str": "figure",
"uris": null
},
"TABREF0": {
"html": null,
"text": "Hypertext feature heading bonusWithin a given heading level, the hypertext feature assigns a higher score to sentences that appear earlier within that level based on both relative paragraph order within the heading and relative sentence position within each paragraph.",
"type_str": "table",
"num": null,
"content": "<table><tr><td colspan=\"19\">The equation used to compute the hypertext</td></tr><tr><td colspan=\"7\">score for a sentence is</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>hypertext</td><td>_</td><td>score</td><td>=</td><td>heading</td><td>_</td><td>bonus</td><td>*</td><td>4</td><td>1</td><td>par</td><td>_</td><td>no</td><td>*</td><td>4</td><td>1</td><td>sent</td><td>_</td><td>no</td></tr></table>"
}
}
}
}