|
{ |
|
"paper_id": "C16-1023", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:04:12.238637Z" |
|
}, |
|
"title": "Abstractive News Summarization based on Event Semantic Link Network", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Key Lab of Intelligent Information Processing", |
|
"institution": "University of Chinese Academy of Sciences", |
|
"location": { |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Aston University", |
|
"location": { |
|
"settlement": "Birmingham", |
|
"country": "UK" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Zhuge", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Key Lab of Intelligent Information Processing", |
|
"institution": "University of Chinese Academy of Sciences", |
|
"location": { |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Keenan", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper studies the abstractive multi-document summarization for event-oriented news texts through event information extraction and abstract representation. Fine-grained event mentions and semantic relations between them are extracted to build a unified and connected event semantic link network, an abstract representation of source texts. A network reduction algorithm is proposed to summarize the most salient and coherent event information. New sentences with good linguistic quality are automatically generated and selected through sentences over-generation and greedy-selection processes. Experimental results on DUC 2006 and DUC 2007 datasets show that our system significantly outperforms the state-of-the-art extractive and abstractive baselines under both pyramid and ROUGE evaluation metrics.", |
|
"pdf_parse": { |
|
"paper_id": "C16-1023", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper studies the abstractive multi-document summarization for event-oriented news texts through event information extraction and abstract representation. Fine-grained event mentions and semantic relations between them are extracted to build a unified and connected event semantic link network, an abstract representation of source texts. A network reduction algorithm is proposed to summarize the most salient and coherent event information. New sentences with good linguistic quality are automatically generated and selected through sentences over-generation and greedy-selection processes. Experimental results on DUC 2006 and DUC 2007 datasets show that our system significantly outperforms the state-of-the-art extractive and abstractive baselines under both pyramid and ROUGE evaluation metrics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Automatic summarization on news documents enables readers more easily to get general information of interesting news. Most of existing summarization methods have neglected the important event-oriented characteristics of news texts although some popular tasks such as DUC (Document Understanding Conference) and TAC (Text Analysis Conference) target at summarizing news documents. The examples below show that the core information of news texts is the atomic event mentions as shown in bolded words and their related concepts as shown in italic phrases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Lawyer Morris Dees, who is representing Victoria Keenan after she was attacked by two guards in July 1998, introduced depositions to contradict the men's testimony. \u2022 Morris S. Dees Jr., who was the co-founder of the Southern Poverty Law Center, defended for Keenan after she was assaulted by two security guards near the headquarters of the Aryan Nations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "An event usually tells us \"who did what to whom when and where . . . \" The most important components of an event include its actor (who, the agent of the event), action (what, the core meaning of the event) and receiver (whom, the target of the event action). Other arguments indicate other attributes of the event, such as time (when) and location (where). The event arguments are concepts related with the event action. For event-based news summarization, extracting the most salient events and related concepts are the core tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "One of the most similar related work (Glava\u0161 and\u0160najder, 2014) investigated constructing event graph for multi-document summarization. The nodes in event graph denote event mentions while edges denote temporal relations between event mentions. It ranks event mentions based on the temporal relations and then generates summary by extracting sentences that contain salient event mentions. However, the problems of information overlapping and lacking of coherence cannot be overcomed by extractive methods. This paper explores the issue of abstractive summarization for event-oriented news texts. The semantic relations between events like cause-effect relation are also extracted to help generate more coherent and informative summary in our system. Figure 1 illustrates the procedure of our system. Firstly, the semantic information of texts is represented by constructing event semantic link network (Zhuge, 2012) . The semantic nodes of the network are events extracted from the source texts while semantic links are relations between events. Concept co-reference resolution and event co-reference resolution are both conducted within and cross documents to aggregate information from different places. Secondly, the event semantic link network is reduced to obtain connected and condensed summary network. A network reduction algorithm that makes use of the semantic links between event nodes is proposed to trade off among selecting salient information, maintaining coherence, and conveying correct and complete information. Finally, coherent and concise summary is automatically generated based on the summary network through sentences over-generation and greedy selection processes. The contributions of this work include:", |
|
"cite_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 62, |
|
"text": "(Glava\u0161 and\u0160najder, 2014)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 901, |
|
"end": 914, |
|
"text": "(Zhuge, 2012)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 749, |
|
"end": 757, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 The abstractive summarization for event-oriented news texts is made by extracting fine-grained events and constructing event semantic link network as the abstract representation of source texts. \u2022 An ILP-based network reduction algorithm using semantic links between events is proposed to obtain the most condensed, salient and coherent semantic information of source texts. \u2022 Informative and concise summary is automatically generated based on the event semantic link network after reduction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "As shown in Figure 1 , the first procedure of our system is to extract events and construct event semantic link network (ESLN). Within ESLN, semantic nodes are event mentions consisting of event actions and arguments. The action indicates the central meaning of an event, while the arguments render the attributes of an event (Ahn, 2006) . In this work, each event is represented as a tree with the event action as its root node. The children of the root node are event arguments, including actor, receiver, time and location. The collapsed form of an event tree can be denoted as e=Action (Actor, Receiver, TimeArg, LocArg). We use semantic relations between events as semantic links (subsection 2.3). ESLN provides an event-based abstract representation for news documents, which is a directed and connected graph. The ESLN is constructed by: (1) extracting concepts from documents; (2) identifying event actions and extracting event arguments; (3) predicting the semantic links between event mentions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 326, |
|
"end": 337, |
|
"text": "(Ahn, 2006)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 20, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Event Semantic Link Network Construction", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "All noun phrases extracted from documents are defined as concepts. To enrich the semantics of a concept, we model it as an object which consists of its core noun phrase and attributes. The attributes of a concept reflect the relationships between this concept and other concepts. A concept a implied by its core noun phrase is denoted by a(", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Concept Extraction", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "r 1 \u2212 \u2192 c 1 , r 2 \u2212 \u2192 c 2 , \u2022 \u2022 \u2022 , rn \u2212 \u2192 c n )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Concept Extraction", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where c i indicates another concept and r i indicates a specific relation between concept a and concept c i . Concept c i is defined as an attribute of concept a.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Concept Extraction", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "word, lemma of the token and its surrounding tokens (five tokens to the left and right) POS-tag features part-of-speech tags of the token and its surrounding tokens (five to the left and right) Syntactic features the set of dependency relations of the token Modifier features modal modifiers, auxiliary verbs and negations. Word vectors 100-dimensional GloVe word vector (Pennington et al., 2014) Table 1: The features for the event identification model Position features the set of features that measure the distance between event actions (number of tokens) and their relative position (same sentence, adjacent sentences, adjacent event mentions) Lexical features word, lemma, stem, and pos-tag of both event actions as well as features indicating whether the word forms are the same, the semantic similarity between actions words, the word and lemma of each token between the action words Syntactic features syntactic path between the actions (dependency labels on the syntactic path between the actions), features indicating whether one action syntactically dominates the other, features indicating whether one is a predicate of an adverbial clause governed by the other event, and the set of dependency relations of both actions Modifier features the set of features that describe the modal, auxiliary, negation, and determination modifiers of both event actions Word vectors 100-dimensional GloVe word vector of both event action words Discourse features the discourse relations between event mentions. We use the document-level discourse analysis method (Joty et al., 2013) to extract the discourse relations between event mentions. We extract concepts and their attributes based on dependency trees. Texts are preprocessed by Stanford CoreNLP pipeline (Manning et al., 2014) . The dependency trees are transformed into semantic graph by pronoun resolution (Schuster et al., 2015) . All named entities are identified as concepts. For other nouns, we expand on \"compound\", \"name\", \"amod\", \"neg\", \"nummod\" and \"dep\" dependency edges to build the basic noun-phrase concept. We also expand on \"appos\", \"acl\", \"acl:relcl\", \"nmod:of\" and \"nmod:poss\" edges for non-proper nouns, since these are relative clauses that convey important information.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1560, |
|
"end": 1579, |
|
"text": "(Joty et al., 2013)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 1759, |
|
"end": 1781, |
|
"text": "(Manning et al., 2014)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1863, |
|
"end": 1886, |
|
"text": "(Schuster et al., 2015)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 397, |
|
"end": 453, |
|
"text": "Table 1: The features for the event identification model", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Lexical features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To extract the attributes of a concept, we extract the relations between the concept and other related concepts. In order to differentiate with event actions, the valid syntactic patterns of relations between the head concept and its attributes is restricted as \"be\", \"be-NP-prep\" and \"be-AP-prep\" where NP indicates noun phrase and AP indicates adjective phrase, such as \"Morris Dees is a lawyer\" and \"Morris Dees is the co-founder of Southern Poverty Law Center\". Several syntactic rules, which use the dependency labels (including \"nsubj\", \"appos\", \"nmod:of\" and \"nmod:poss\") between head tokens of concepts, are designed to detect those specific relations between concepts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexical features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To aggregate information across documents, we need to recognize all concept co-references across documents. The co-reference resolution within single document has been conducted during the preprocessing stage by Stanford CoreNLP pipeline, so those resolution rules can be adopted. We formulate the co-references detection in a hierarchical agglomerative clustering framework similar to (Shen et al., 2013) . A set of clusters are obtained and each cluster contains mentions refer to the same concept in the documents. For each cluster of co-referential concepts, we only reserve the most representative one and merge the attributes of all other mentions. For example, the concept \"Morris Dees\" in Figure 1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 386, |
|
"end": 405, |
|
"text": "(Shen et al., 2013)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexical features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The procedure of event identification consists of two steps: event action identification and event arguments extraction. The first step is formulated as a supervised classification task with features as shown in Table 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 212, |
|
"end": 219, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Event Identification", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The arguments of an event are concepts related to the event action. Since we have extracted all concepts from documents, the argument extraction is to judge the argument type of each concept. We define in total fifteen dependency patterns using Semgrex expressions (Chambers et al., 2007) . These patterns mainly capture the subject-predicate-object constructions, subject-predicate constructions, passive constructions, prepositional constructions and clausal constructions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 265, |
|
"end": 288, |
|
"text": "(Chambers et al., 2007)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Event Identification", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Since important events are usually mentioned many times in the documents. For example, in Figure 1 \"Victoria Keenan was attacked by two guards in July 1998\" and \"Keenan was assaulted by two security guards\" refer to the same event. To determine whether two event mentions are co-referential, both the event actions and event arguments are compared. We use WordNet-based similarity method (Pedersen et al., 2004) to judge the semantic similarity between event actions. Two event mentions are identical only when the similarity between event actions is above a threshold (set as 0.8 after tuning) and corresponding event arguments are identical or co-referential. For all identical event mentions, we just reserve the most representative one and merge the relations and arguments of other mentions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 388, |
|
"end": 411, |
|
"text": "(Pedersen et al., 2004)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Event Identification", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We leverage the sentence structures and discourse features in documents to infer the relations between events in order to construct an informative event semantic link network. Through analyzing large numbers of news texts, we find following types of semantic relations between events are very common:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Event Relation Prediction", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u2022 Temporal link. It indicates the temporal relations between two events, which consists of directed asymmetric links (BEFORE and AFTER) and symmetric links (OVERLAP). For symmetric links, we add two directed links in opposite directions between two event nodes; \u2022 Cause-effect link, denoted by ce as in e ce \u2212 \u2192 e , for which the predecessor event e is a cause of its successor event e and the successor event e is an effect of its predecessor event e.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Event Relation Prediction", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u2022 Purpose link, denoted by pur as in e pur \u2212 \u2212 \u2192 e , for which the successor event e is the purpose of its predecessor event e. Event e is to be realized through event e.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Event Relation Prediction", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u2022 Means link, denoted by mea as in e mea \u2212\u2212\u2192 e , for which the event e is a method or instrument which tends to make realization of event e more likely.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Event Relation Prediction", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u2022 Condition link, denoted by con as in e con \u2212 \u2212 \u2192 e , for which the predecessor event e is a condition of its successor event e . Realization of e depends on realization of event e.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Event Relation Prediction", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u2022 Sequential link, denoted by seq as in e seq \u2212 \u2212 \u2192 e , for which the event e is a successor of event e. It usually describes a number of event actions with succession relationships.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Event Relation Prediction", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u2022 Attribution link, denoted by attri as in e attri \u2212 \u2212\u2212 \u2192 e , for which event e is an attribution of event e, indicating its specific contents.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Event Relation Prediction", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "For predicting the semantic links between each pair of event nodes, we use an L2-regularized maximum entropy classifier with features as shown in Table 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 146, |
|
"end": 153, |
|
"text": "Table 2", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Event Relation Prediction", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "In order to make the event semantic link network denser and more informative, we add \"Common Argument\" links between event nodes that share the same concept as argument. For example, \"Morris Dees defended for Keenan\" and \"Morris Dees contradicted the men's testimony\" both use concept \"Morris Dees\" as actor argument. After expanding the semantic links between events, we get a unified, connected and informative ESLN to represent the abstract information of source texts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Event Relation Prediction", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The constructed ESLN is an abstract representation of source documents. We summarize the documents by summarizing the network and generate summary based on the reduced network. For event-based summarization, the summary network must contain the most salient events and concepts information. We model the summarization of ESLN as a structured prediction problem (Collins, 2002) that trades off among selecting salient information, maintaining coherence, and conveying correct and complete information.", |
|
"cite_spans": [ |
|
{ |
|
"start": 361, |
|
"end": 376, |
|
"text": "(Collins, 2002)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summarization", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Let E and C denote all the event nodes and concepts in ESLN, where each node e \u2208 E represents a unique event and each concept c \u2208 C is an argument of an event. To obtain the most salient and condensed summary network, we seek to maximize the summation of saliency scores of the selected events and concepts. For summary network which contains event set E and concept set C , its saliency score is:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summarization", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "e\u2208E \u03b8 T f (e) + c\u2208C \u03c8 T g(c)", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Summarization", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "where f (e) and g(c) represent the features of event e and concept c respectively (described in Table 3 ). \u03b8 and \u03c8 are vectors of feature weights for events and concepts respectively. The network reduction problem is decoded as an integer linear programming (ILP) by incorporating some priori knowledge as constraints ( \u00a73.1). Features weights are estimated by using structured pre-", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 103, |
|
"text": "Table 3", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Summarization", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Concept Type binary feature indicates whether it's named entity and whether it appears in the topic description Concept Freq.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "one binary feature for each frequency threshold t=0/1/2/5/10 Concept Pos.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "average and foremost position of sentences containing the concept (binarized by 5 thresholds) Concept Head word, lemma, pos, depth in the dependency tree (binarized by 5 thresholds) and whether it appears in the topic description Concept Span average and longest word span of concept (binarized by 5 thresholds)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Action Word word, lemma, pos, depth in the dependency tree (binarized by 5 thresholds) and whether it appears in the topic description Action Freq.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "binary feature for each frequency threshold t=0/1/2/5/10, average and foremost position of sentences containing the concept Actor Arg.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "all concept features of actor argument Receiver Arg.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "all concept features of receiver argument. If don't contain receiver argument, all set as 0 Time Argument one binary feature indicates whether it contains time argument Location Arg.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "one binary feature indicates whether it contains location argument Semantic Links total number of links from and to the event node in event graph (binarized by 5 thresholds)) diction algorithm ( \u00a73.2). After obtaining the summary network, concise and coherent summary can be generated through sentences over-generation and greedy selection ( \u00a73.3).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Let M and N be the total number of event nodes and concepts in source ESLN. We use e i and c j to represent the i-th event and j-th concept respectively. Let u i and v j be binary variables. u i is set to 1 iff event e i is selected and v j is set to 1 iff concept c j is selected.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Network Reduction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The ILP maximization objective can be transformed into Equation 2, which contains two parts: the first part tends to select more important events; and the second part tends to select more concepts to increase information diversity and reduce redundancy in the summary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Network Reduction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "M i=1 ui\u03b8 T f (ei) + N j=1 vj\u03c8 T g(cj)", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Network Reduction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To ensure the summary network could generate coherent summary and convey complete and correct information, the following groups of constraints are required:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Network Reduction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Complete facts. To guarantee the selected event node convey complete fact, the following constraints are introduced:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Network Reduction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u2200i, if cj \u2208 Arguments(ei), vj \u2265 ui (3) \u2200j, i\u2208c j .relatedEvents ui + k\u2208c j .attributes v k \u2265 vj (4) \u2200i, k, if ei Attribution \u2212\u2212\u2212\u2212\u2212\u2212\u2212\u2192 e k , ui \u2264 u k", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Network Reduction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Equation 3 ensures that if an event was selected, the arguments of the event should all be selected. Equation 4 guarantees that if a concept was selected, at least one event that it related to or an attribute that it has should be selected. These two constraints ensure the selected event or concept convey complete information. If event e k is an attribution of event e i , then e k describes specific contents of event e i . Equation 5 guarantees that if event e i is selected, its attribution e k must be selected.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Network Reduction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Coherence. In order to generate coherent summary, the reduced summary network should be connected. Flow-based constraints have previously been used (Thadani and McKeown, 2013; Liu et al., 2015) to ensure the connectivity of subgraph. For each pair of event nodes e i and e k , the binary variable l i,k indicates the semantic link between them. Only if both e i and e k are selected and there is a link between them, l i,k can be set to 1, otherwise 0, which can be formulated as following:", |
|
"cite_spans": [ |
|
{ |
|
"start": 148, |
|
"end": 175, |
|
"text": "(Thadani and McKeown, 2013;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 176, |
|
"end": 193, |
|
"text": "Liu et al., 2015)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Network Reduction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2200i, k, l i,k \u2264 ei, l i,k \u2264 e k", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Network Reduction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "if there is no link from ei to e k , l i,k = 0", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Network Reduction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "A set of single-commodity flow variables f i,k that each takes a non-negative integral value and represents the flow from event node e i to e k , were used to enforce the connectivity of summary network. We set a dummy \"ROOT\" node which is connected with only one selected event node in the ESLN (Equation 7), denoted as e 0 . The root node sends up to M units of flows to the selected event nodes (Equation 8). Each selected node consumes one unit of flow (Equation 9). Flow can only be sent over a link if and only if the link variable l is 1 (Equation 10).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Network Reduction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "For concept a( r 1 \u2212 \u2212 \u2192 c1, r 2 \u2212 \u2212 \u2192 c2, \u2022 \u2022 \u2022 , rn \u2212 \u2212 \u2192 cn)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CONCEPT DESCRIPTION RULES", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ", the description of concept a can be:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CONCEPT DESCRIPTION RULES", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "1.Appositive modifier \"a, c1, c2. . . \", e.g. \"Morris Dees, civil rights lawyer, co-founder of Souther Poverty Low Center, . . . \" 2.Attributive clause \"a who/which/that r1c1 . . . \", e.g. \"Morris Dees who was the co-founder of Southern Poverty Law Center and a civil rights lawyer . . . \"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CONCEPT DESCRIPTION RULES", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "3.Appositive modifier mixed with attributive clause, e.g. \"Civil rights lawyer Morris Dees who was the co-founder of Southern Poverty Law Center . . . \" if e1 pur \u2212 \u2212\u2212 \u2192 e2,then generate \"e1 in order to e2\" and \"e1 so that e2\"; if e1 mea \u2212 \u2212\u2212 \u2192 e2, then generate \"e1 by e2\" and \"e1 by the way that e2\" if e1 attri \u2212 \u2212\u2212\u2212 \u2192 e2, then generate \"e1 e2\", \"e1 about/on/in/with/at e2\" and \"e1 that e2\"; if e1 seq \u2212 \u2212 \u2192 e2, then generate \"e1, e2\", \"e1 and e2\"; Table 4 : The set of concept description and sentence structure rules.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 452, |
|
"end": 459, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "CONCEPT DESCRIPTION RULES", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u2200i \u2265 1, l0,i \u2264 ui;, M i=1 l0,i = 1 (7) M i=1 f0,i \u2212 M i=1 ui = 0 (8) \u2200k \u2265 1, i f i,k \u2212 p f k,p \u2212 u k = 0 (9) \u2200i \u2265 0, k \u2265 1, M \u2022 l i,k \u2212 f i,k \u2265 0", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "SENTENCE STRUCTURING RULES", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Length Constraint. To control the summary compression rate, the total number of selected events is limited less than L:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SENTENCE STRUCTURING RULES", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "M i=1 ui \u2264 L (11)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SENTENCE STRUCTURING RULES", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where parameter L is set to control the graph size after reduction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SENTENCE STRUCTURING RULES", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We learn feature weights \u03b8 and \u03c8 by training on a set of source ESLN paired with gold summary network. The source ESLN is constructed from source texts whereas the gold summary network is constructed from reference summaries and then mapped to the source ESLN by texts similarity method (Pilehvar et al., 2013). We formulate our estimation problem as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Weights Estimation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u2212score(G * ) + maxG(score(G) + cost(G; G * ))", |
|
"eq_num": "(12)" |
|
} |
|
], |
|
"section": "Feature Weights Estimation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where G * denotes the gold summary network. score() is defined in Equation 1. cost(G; G * ) penalizes each event or concept in G but not in G * , which can be easily incorporated into the linear objective in Equation 2. We optimize our objective using AdaGrad (Duchi et al., 2011) with l 2 regularization (\u03bb = 0.01), with an initial step size 0.1. The ILP model is solved using Gurobi 6.5.2.", |
|
"cite_spans": [ |
|
{ |
|
"start": 260, |
|
"end": 280, |
|
"text": "(Duchi et al., 2011)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Weights Estimation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Since each event node is structured as e=Action (Actor, Receiver, TimeArg, LocArg), we can generate complete sentence efficiently for it using SimpleNLG (Gatt and Reiter, 2009) . However, through experiments we find that low linguistic quality is the biggest problem with the generated sentences, which include syntax error, monotone sentence structure and repetition of the same noun phrases. To improve the linguistic quality of summary, we first over-generate large numbers of summary sentences and then use a greedy algorithm to select sentences with the best linguistic quality and no information overlapping. Sentence over-generation. To generate a complete and informative sentence, both the description of concepts and the organization of sentence structures need to be settled in following ways:", |
|
"cite_spans": [ |
|
{ |
|
"start": 153, |
|
"end": 176, |
|
"text": "(Gatt and Reiter, 2009)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary Generation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u2022 Each concept with several attributes can be described in different ways using concept description rules in Table 4 . \u2022 For each event node, we use SimpleNLG (Gatt and Reiter, 2009) tool to generate several different sentences, among which the description of concepts or the orders of arguments are different. Table 6 : The performance of concepts extraction, events identification and event relations prediction.", |
|
"cite_spans": [ |
|
{ |
|
"start": 159, |
|
"end": 182, |
|
"text": "(Gatt and Reiter, 2009)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 109, |
|
"end": 116, |
|
"text": "Table 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 311, |
|
"end": 318, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Summary Generation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u2022 If two events share semantic links with each other, we merge them to generate one unified sentence by using corresponding sentence structuring rules in Table 4 . Note that, when two events share the same actor concept, only one is reserved. \u2022 For any two events that share the same actor, we merge them to generate one sentence using conjunction word \"and\" to connect event actions and arguments. Only one actor is kept as the subject.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 154, |
|
"end": 161, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Summary Generation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Greedy selection. After the above step, we get large numbers of candidate summary sentences. Some of them would have information overlapping with each other if generated from the same event node. To improve the linguistic quality of summary, we iteratively select a sentence with the highest linguistic quality and delete sentences that have information overlapping with it from the candidate sentences set. The linguistic quality of sentence s = {w 1 , w 2 , . . . , w L } is defined similarly as (Banerjee et al., 2015) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 498, |
|
"end": 521, |
|
"text": "(Banerjee et al., 2015)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary Generation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "LQ(s) = 1 1 \u2212 log2 L t=1 P (wt|wt\u22121wt\u22122) L (13)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary Generation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where L is the total number of words in sentence s; w 0 and w \u22121 both represent the beginning of sentence s. The 3-gram model P (w t |w t\u22121 , w t\u22122 ) is trained on the English Gigaword corpus (http://www. keithv.com/software/giga/). The coherence constraints guarantee the selected summary network to be connected and have a flow from the ROOT node to selected nodes. The selected sentences are ordered based on the direction of flows to obtain a coherent summary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary Generation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "To evaluate the performance of our system, we use two datasets that have been widely used in multidocument summarization shared tasks: DUC 2006 and DUC 2007. Each task has a gold standard dataset consisting of document clusters and reference summaries. DUC 2007 was manually annotated by using annotation tool brat (http://brat.nlplab.org) to extract gold events and gold relations between events, which are used for training the event identification model and event relations prediction model. Table 5 shows the details of the annotated dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 495, |
|
"end": 502, |
|
"text": "Table 5", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset and Experimental Settings", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The annotated dataset was split into training set (25 topics), development set (5 topics) and test set (15 topics). After training and tuning, the performance of our system is evaluated on the test set as shown in Table 6 . An event mention is correctly extracted only if both the event action and event arguments are correct. Table 6 only shows the recall of concept extraction, because we extract all kinds of concepts, whereas only event arguments are annotated in the annotated dataset. The feature weights \u03b8 and \u03c8 of event nodes and concepts are also estimated on the training set.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 214, |
|
"end": 221, |
|
"text": "Table 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 327, |
|
"end": 334, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset and Experimental Settings", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "To evaluate the performance of our summarization model, we use both ROUGE (Lin and Hovy, 2003) and Pyramid (Nenkova and Passonneau, 2004) evaluation metrics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 94, |
|
"text": "(Lin and Hovy, 2003)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 107, |
|
"end": 137, |
|
"text": "(Nenkova and Passonneau, 2004)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset and Experimental Settings", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "ROUGE-1.5.5 toolkit was used to evaluate the quality of summary on DUC 2006 and DUC 2007 (test set) dataset. We differentiate the different components of our system by including and not including the coherence constraints in ILP-based network reduction algorithm and using the manually annotated gold ESLN in our system. Our systems are compared with several baselines: Centroid (Radev et al., 2000) LexRank (Erkan and Radev, 2004) . The performance of NIST baseline and the average ROUGE scores of all the participating systems (i.e. AveDUC) both for DUC 2006 and DUC 2007 main tasks are also listed.", |
|
"cite_spans": [ |
|
{ |
|
"start": 379, |
|
"end": 399, |
|
"text": "(Radev et al., 2000)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 408, |
|
"end": 431, |
|
"text": "(Erkan and Radev, 2004)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results with ROUGE Evaluation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "According to the results in Table 7 , our systems significantly outperform (paired t-test with p<0.05) all the baselines, which demonstrates that extracting event information from texts and summarizing based on structured information is much more effective than summarizing on sentence level.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 35, |
|
"text": "Table 7", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results with ROUGE Evaluation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In addition, we also compare our system (ESLN with coherence) with several state-of-the-art summarization methods: graph-based extractive method MultiMR (Wan and Xiao, 2009) , sparse-coding-based compressive method RA-MDS (Li et al., 2015) , and two most recently developed abstractive methods ILPSumm (Banerjee et al., 2015) and PSM . The results show that our system significantly (paired t-test with p<0.05) outperforms all the other four systems.", |
|
"cite_spans": [ |
|
{ |
|
"start": 153, |
|
"end": 173, |
|
"text": "(Wan and Xiao, 2009)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 222, |
|
"end": 239, |
|
"text": "(Li et al., 2015)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 302, |
|
"end": 325, |
|
"text": "(Banerjee et al., 2015)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results with ROUGE Evaluation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The results also show that our system with coherence constraints achieves better performance than the counterpart without coherence constraints. So the coherence constraints are very helpful to select more salient and coherent information. Just as expected, the system using gold ESLN achieves the best performance. Incorrect dependency parsing and co-reference resolution will reduce the accuracy of extracting event information. On the other hand, it also verifies that the method that summarize texts based on accurate event information is effective.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results with ROUGE Evaluation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Since ROUGE metric evaluates summaries by strict string matching, we also use the pyramid evaluation metric which can measure the summary quality beyond simply string matching. It involves semantic matching of summary content units (SCUs) so as to recognize alternate realizations of the same meaning, which provides a better metric for abstractive summary evaluation. We employ the automated version of pyramid scoring (set threshold value to 0.6) in (Passonneau et al., 2013) . Table 7 shows the evaluation results of our system and two abstractive baselines on both DUC 2006 and DUC 2007(test set) . The results show that our system significantly (p<0.05) outperform the two baselines on both datasets, which demonstrates that our system can generate more informative summary. Table 8 shows a comparison of summaries generated by our system and human on DUC 2007 dataset (D0701A). The results show that our summary behaves similarly to human summary in following aspects: (1) Aggregating information from different places. For example, the description of \"Morris Dees\" includes information from several different documents, which are extracted as attributes of concept \"Morris Dees\" in our system; (2) Organizing sentences coherently. The coherence constraints in ILP-based network reduction component ensure the selected event information to be coherent. (3) Clearly pronoun reference. The adjacent sentences with the same subject in the summary are post-edited by replacing subjects of successor sentences with appropriate pronouns. Even though we incorporate the sentences over-generation and greedy-selection components in our system, some sentences in the generated summaries also have few syntax errors. Most cases are because of non-accurate event extraction caused by incorrect dependency parsing or coreference resolution.", |
|
"cite_spans": [ |
|
{ |
|
"start": 452, |
|
"end": 477, |
|
"text": "(Passonneau et al., 2013)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 569, |
|
"end": 581, |
|
"text": "DUC 2006 and", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 582, |
|
"end": 600, |
|
"text": "DUC 2007(test set)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 480, |
|
"end": 487, |
|
"text": "Table 7", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 780, |
|
"end": 787, |
|
"text": "Table 8", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results with Pyramid Evaluation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Summary by Our System: Morris Dees who was a crusader against intolerance, Keenans' attorney, the chief trial counsel, executive director and co-founder of the Southern Poverty Law Center used lawsuit to fight hate groups. He kept track of hate crime. He put East Peoria leader and won significant civil judgment against White Aryan Resistance and Ku Klux Klan in touch in Chicago with David Ostendorf. He formed a broad-based coalition and won a series of civil rights suit against other racist group in a campaign on race issue. He got an unwarranted slap in the Media Watch column and introduced photograph in the same issue. Southern Poverty Law Center, montgomery-based used civil suit and previously recorded a 20-percent increase in hate group. It battled racial bias and used civil law. It tracked hate group and won major legal fight against other white supremacist group and Ku Klux Klan. The 1973 federal lawsuit had the practical effect. The practical effect provided equal service to black. . .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Human-written Summary: The Southern Poverty Law Center is a nonprofit research group based in Montgomery, Alabama that battles racial bias. It tracks US hate crimes and the spread of racist organizations. It covers right-wing extremists in its magazine Intelligence Report. Through its Teaching Tolerance program, it provides materials to teachers to promote interracial and intercultural understanding. It freely distributes booklets on combating hate to schools, mayors, police chiefs, and other interested groups and citizens. It advises city leaders faced with hate crimes. Morris Dees co-founded the SPLC in 1971 and is its chief trial counsel and executive director, following Julian Bond. Dees and the SPLC seek to destroy hate groups through multi-million dollar civil suits that go after assets of groups and their leaders. In six lawsuits based on hate crimes or civil rights abuses, they have never lost. They successfully sued the Ku Klux Klan and the related Invisible Empire Klan, United Klan of America and . . . Table 8 : Example summary of D0701A in DUC2007 dataset by our system and the gold human summary (Only several leading sentences are displayed).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1028, |
|
"end": 1035, |
|
"text": "Table 8", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Abstractive Multi-document summarization. Previous researches have shown that human write summaries through sentence aggregation and fusion (Cheung and Penn, 2013) . Abstraction-based approaches that gather information across sentences boundaries have become more and more popular in recent years. Different abstractive summarization methods can be summarized into four technique routes: (1) sentence fusion based methods (Barzilay and McKeown, 2005; Filippova and Strube, 2008; Banerjee et al., 2015) first cluster sentences into several themes and then generate a new sentence for each cluster by fusing the common information of all sentences in the cluster; (2) information extraction based methods (Genest and Lapalme, 2011; Li, 2015) extract information units, such as Information Items or Basic Semantic Unit, as components for generating sentences; (3) summary revision based methods (Nenkova, 2008; Siddharthan et al., 2011) try to improve quality of summary by noun phrases rewriting and co-reference resolution; (4) pattern-based sentence generation methods (Wang and Cardie, 2013; Pighin et al., 2014; generate new sentences based on a set of sentence generation patterns learned from corpus or designed templates.", |
|
"cite_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 163, |
|
"text": "(Cheung and Penn, 2013)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 422, |
|
"end": 450, |
|
"text": "(Barzilay and McKeown, 2005;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 451, |
|
"end": 478, |
|
"text": "Filippova and Strube, 2008;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 479, |
|
"end": 501, |
|
"text": "Banerjee et al., 2015)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 892, |
|
"end": 907, |
|
"text": "(Nenkova, 2008;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 908, |
|
"end": 933, |
|
"text": "Siddharthan et al., 2011)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 1069, |
|
"end": 1092, |
|
"text": "(Wang and Cardie, 2013;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 1093, |
|
"end": 1113, |
|
"text": "Pighin et al., 2014;", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Recently, some works studied the use of deep learning techniques for abstractive summarization tasks, which use sequence-to-sequence generation techniques on single document or sentence summarization (Rush et al., 2015; Chopra et al., 2016) . A multi-dimensional summarization methodology was proposed to transform the paradigm of traditional summarization research through multi-disciplinary fundamental exploration on semantics, dimension, knowledge, computing and cyber-physical society (Zhuge, 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 200, |
|
"end": 219, |
|
"text": "(Rush et al., 2015;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 220, |
|
"end": 240, |
|
"text": "Chopra et al., 2016)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 490, |
|
"end": 503, |
|
"text": "(Zhuge, 2016)", |
|
"ref_id": "BIBREF43" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Event extraction. Event extraction is a traditional task in Information Extraction, which aims to recognize event mentions and arguments of predefined types (such as the ACE tasks). The works on event extraction either divide the task into separate subtasks, such as event-trigger extraction and argument extraction (Liao and Grishman, 2010; Hong et al., 2011) or model it jointly Li and Ji, 2014) . These works mainly focus on predefined event and argument types. However, we focus on open-domain and more fine-grained event information extraction for multi-document summarization.", |
|
"cite_spans": [ |
|
{ |
|
"start": 316, |
|
"end": 341, |
|
"text": "(Liao and Grishman, 2010;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 360, |
|
"text": "Hong et al., 2011)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 381, |
|
"end": 397, |
|
"text": "Li and Ji, 2014)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Abstract representations. With the development of Abstract Meaning Representation (AMR) (Banarescu et al., 2012), representing semantic information with graphs has been studied in such tasks as summarization (Liu et al., 2015) and event detection (Kai and Grishman, 2015). Although several techniques on parsing sentences to AMR (Flanigan et al., 2014; Wang et al., 2015) have been developed, the performance of AMR parsing is very limited at the present.", |
|
"cite_spans": [ |
|
{ |
|
"start": 208, |
|
"end": 226, |
|
"text": "(Liu et al., 2015)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 329, |
|
"end": 352, |
|
"text": "(Flanigan et al., 2014;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 353, |
|
"end": 371, |
|
"text": "Wang et al., 2015)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The approach proposed in this paper generates summary based on event information extraction and abstract representation, which achieves good performance on both DUC 2006 and DUC 2007 datasets. It generates new sentences based on structured event information and organizes sentences coherently based on semantic links. The experiment results show that the summaries generated by our system are relatively informative, coherent and compact, which demonstrates that the semantic link network based abstract representation of source texts is effective in making abstractive summarization.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "6" |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "The stages of event extraction", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Ahn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the Workshop on Annotating and Reasoning about Time and Events", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Ahn. 2006. The stages of event extraction. In Proceedings of the Workshop on Annotating and Reasoning about Time and Events, pages 1-8. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Abstract meaning representation (amr) 1.0 specification", |
|
"authors": [ |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Banarescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Bonial", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shu", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Madalina", |
|
"middle": [], |
|
"last": "Georgescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kira", |
|
"middle": [], |
|
"last": "Griffitt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulf", |
|
"middle": [], |
|
"last": "Hermjakob", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1533--1544", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Laura Banarescu, Claire Bonial, Shu Cai, Madalina Georgescu, Kira Griffitt, Ulf Hermjakob, Kevin Knight, Philipp Koehn, Martha Palmer, and Nathan Schneider. 2012. Abstract meaning representation (amr) 1.0 specification. In EMNLP, pages 1533-1544.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Multi-document abstractive summarization using ilp based multi-sentence compression", |
|
"authors": [ |
|
{ |
|
"first": "Siddhartha", |
|
"middle": [], |
|
"last": "Banerjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prasenjit", |
|
"middle": [], |
|
"last": "Mitra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazunari", |
|
"middle": [], |
|
"last": "Sugiyama", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "IJCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1208--1214", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Siddhartha Banerjee, Prasenjit Mitra, and Kazunari Sugiyama. 2015. Multi-document abstractive summarization using ilp based multi-sentence compression. In IJCAI, pages 1208-1214.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Sentence fusion for multidocument news summarization", |
|
"authors": [ |
|
{ |
|
"first": "Regina", |
|
"middle": [], |
|
"last": "Barzilay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kathleen R Mckeown", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Computational Linguistics", |
|
"volume": "31", |
|
"issue": "3", |
|
"pages": "297--328", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Regina Barzilay and Kathleen R McKeown. 2005. Sentence fusion for multidocument news summarization. Computational Linguistics, 31(3):297-328.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Abstractive multidocument summarization via phrase selection and merging", |
|
"authors": [ |
|
{ |
|
"first": "Lidong", |
|
"middle": [], |
|
"last": "Bing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piji", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Liao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wai", |
|
"middle": [], |
|
"last": "Lam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weiwei", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Passonneau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1506.01597" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lidong Bing, Piji Li, Yi Liao, Wai Lam, Weiwei Guo, and Rebecca J Passonneau. 2015. Abstractive multi- document summarization via phrase selection and merging. arXiv preprint arXiv:1506.01597.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Learning alignments and leveraging natural logic", |
|
"authors": [ |
|
{ |
|
"first": "Nathanael", |
|
"middle": [], |
|
"last": "Chambers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trond", |
|
"middle": [], |
|
"last": "Grenager", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chloe", |
|
"middle": [], |
|
"last": "Kiddon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Maccartney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie-Catherine De", |
|
"middle": [], |
|
"last": "Marneffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Ramage", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Yeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the ACL-PASCAL Workshop on Textual Entailment and Paraphrasing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "165--170", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nathanael Chambers, Daniel Cer, Trond Grenager, David Hall, Chloe Kiddon, Bill MacCartney, Marie-Catherine De Marneffe, Daniel Ramage, Eric Yeh, and Christopher D Manning. 2007. Learning alignments and lever- aging natural logic. In Proceedings of the ACL-PASCAL Workshop on Textual Entailment and Paraphrasing, pages 165-170. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Towards robust abstractive multi-document summarization: A caseframe analysis of centrality and domain", |
|
"authors": [ |
|
{ |
|
"first": "Jackie", |
|
"middle": [ |
|
"Chi" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kit", |
|
"middle": [], |
|
"last": "Cheung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerald", |
|
"middle": [], |
|
"last": "Penn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "ACL (1)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1233--1242", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jackie Chi Kit Cheung and Gerald Penn. 2013. Towards robust abstractive multi-document summarization: A caseframe analysis of centrality and domain. In ACL (1), pages 1233-1242.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Abstractive sentence summarization with attentive recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Sumit", |
|
"middle": [], |
|
"last": "Chopra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Auli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Rush", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Harvard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sumit Chopra, Michael Auli, Alexander M Rush, and SEAS Harvard. 2016. Abstractive sentence summarization with attentive recurrent neural networks. NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Discriminative training methods for hidden markov models: Theory and experiments with perceptron algorithms", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Collins. 2002. Discriminative training methods for hidden markov models: Theory and experiments with perceptron algorithms. In EMNLP, pages 1-8. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Adaptive subgradient methods for online learning and stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Duchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elad", |
|
"middle": [], |
|
"last": "Hazan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoram", |
|
"middle": [], |
|
"last": "Singer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2121--2159", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Duchi, Elad Hazan, and Yoram Singer. 2011. Adaptive subgradient methods for online learning and stochas- tic optimization. Journal of Machine Learning Research, 12(Jul):2121-2159.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Lexrank: Graph-based lexical centrality as salience in text summarization", |
|
"authors": [ |
|
{ |
|
"first": "G\u00fcnes", |
|
"middle": [], |
|
"last": "Erkan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dragomir R Radev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Journal of Artificial Intelligence Research", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "457--479", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G\u00fcnes Erkan and Dragomir R Radev. 2004. Lexrank: Graph-based lexical centrality as salience in text summa- rization. Journal of Artificial Intelligence Research, pages 457-479.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Sentence fusion via dependency graph compression", |
|
"authors": [ |
|
{ |
|
"first": "Katja", |
|
"middle": [], |
|
"last": "Filippova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Strube", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--185", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Katja Filippova and Michael Strube. 2008. Sentence fusion via dependency graph compression. In EMNLP, pages 177-185. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "A discriminative graph-based parser for the abstract meaning representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Flanigan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Thomson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah A", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Flanigan, Sam Thomson, Jaime G Carbonell, Chris Dyer, and Noah A Smith. 2014. A discriminative graph-based parser for the abstract meaning representation.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Simplenlg: A realisation engine for practical applications", |
|
"authors": [ |
|
{ |
|
"first": "Albert", |
|
"middle": [], |
|
"last": "Gatt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ehud", |
|
"middle": [], |
|
"last": "Reiter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 12th European Workshop on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "90--93", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Albert Gatt and Ehud Reiter. 2009. Simplenlg: A realisation engine for practical applications. In Proceedings of the 12th European Workshop on Natural Language Generation, pages 90-93. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Framework for abstractive summarization using text-to-text generation", |
|
"authors": [ |
|
{ |
|
"first": "Pierre-", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Etienne", |
|
"middle": [], |
|
"last": "Genest", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guy", |
|
"middle": [], |
|
"last": "Lapalme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the Workshop on Monolingual Text-To-Text Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "64--73", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pierre-Etienne Genest and Guy Lapalme. 2011. Framework for abstractive summarization using text-to-text generation. In Proceedings of the Workshop on Monolingual Text-To-Text Generation, pages 64-73. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Event graphs for information retrieval and multi-document summarization. Expert systems with applications", |
|
"authors": [ |
|
{ |
|
"first": "Goran", |
|
"middle": [], |
|
"last": "Glava\u0161", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan\u0161najder", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "41", |
|
"issue": "", |
|
"pages": "6904--6916", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Goran Glava\u0161 and Jan\u0160najder. 2014. Event graphs for information retrieval and multi-document summarization. Expert systems with applications, 41(15):6904-6916.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Using cross-entity inference to improve event extraction", |
|
"authors": [ |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Hong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bin", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianmin", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guodong", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiaoming", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1127--1136", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu Hong, Jianfeng Zhang, Bin Ma, Jianmin Yao, Guodong Zhou, and Qiaoming Zhu. 2011. Using cross-entity inference to improve event extraction. In ACL, pages 1127-1136. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Combining intra-and multisentential rhetorical parsing for document-level discourse analysis", |
|
"authors": [ |
|
{ |
|
"first": "Giuseppe", |
|
"middle": [], |
|
"last": "Shafiq R Joty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Carenini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Raymond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yashar", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mehdad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "486--496", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shafiq R Joty, Giuseppe Carenini, Raymond T Ng, and Yashar Mehdad. 2013. Combining intra-and multi- sentential rhetorical parsing for document-level discourse analysis. In ACL, pages 486-496.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Improving event detection with abstract meaning representation", |
|
"authors": [], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiang Li Thien Huu Nguyen Kai and Cao Ralph Grishman. 2015. Improving event detection with abstract meaning representation. ACL-IJCNLP 2015, page 11.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Incremental joint extraction of entity mentions and relations", |
|
"authors": [ |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "ACL (1)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "402--412", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qi Li and Heng Ji. 2014. Incremental joint extraction of entity mentions and relations. In ACL (1), pages 402-412.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Joint event extraction via structured prediction with global features", |
|
"authors": [ |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ji", |
|
"middle": [], |
|
"last": "Heng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "ACL (1)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "73--82", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qi Li, Heng Ji, and Liang Huang. 2013. Joint event extraction via structured prediction with global features. In ACL (1), pages 73-82.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Reader-aware multi-document summarization via sparse coding", |
|
"authors": [ |
|
{ |
|
"first": "Piji", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lidong", |
|
"middle": [], |
|
"last": "Bing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wai", |
|
"middle": [], |
|
"last": "Lam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Liao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1504.07324" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piji Li, Lidong Bing, Wai Lam, Hang Li, and Yi Liao. 2015. Reader-aware multi-document summarization via sparse coding. arXiv preprint arXiv:1504.07324.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Abstractive multi-document summarization with semantic information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1908--1913", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Li. 2015. Abstractive multi-document summarization with semantic information extraction. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 1908-1913.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Using document level cross-event inference to improve event extraction", |
|
"authors": [ |
|
{ |
|
"first": "Shasha", |
|
"middle": [], |
|
"last": "Liao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [], |
|
"last": "Grishman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "789--797", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shasha Liao and Ralph Grishman. 2010. Using document level cross-event inference to improve event extraction. In ACL, pages 789-797. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Automatic evaluation of summaries using n-gram co-occurrence statistics", |
|
"authors": [ |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "HLT-NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "71--78", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chin-Yew Lin and Eduard Hovy. 2003. Automatic evaluation of summaries using n-gram co-occurrence statistics. In HLT-NAACL, pages 71-78. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Toward abstractive summarization using semantic representations", |
|
"authors": [ |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Flanigan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Thomson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Norman", |
|
"middle": [], |
|
"last": "Sadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah A", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fei Liu, Jeffrey Flanigan, Sam Thomson, Norman Sadeh, and Noah A Smith. 2015. Toward abstractive summa- rization using semantic representations.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "The stanford corenlp natural language processing toolkit", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jenny", |
|
"middle": [ |
|
"Rose" |
|
], |
|
"last": "Bauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Finkel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Bethard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mcclosky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "ACL (System Demonstrations)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "55--60", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher D Manning, Mihai Surdeanu, John Bauer, Jenny Rose Finkel, Steven Bethard, and David McClosky. 2014. The stanford corenlp natural language processing toolkit. In ACL (System Demonstrations), pages 55-60.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Evaluating content selection in summarization: The pyramid method", |
|
"authors": [ |
|
{ |
|
"first": "Ani", |
|
"middle": [], |
|
"last": "Nenkova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Passonneau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ani Nenkova and Rebecca Passonneau. 2004. Evaluating content selection in summarization: The pyramid method.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Entity-driven rewrite for multi-document summarization", |
|
"authors": [ |
|
{ |
|
"first": "Ani", |
|
"middle": [], |
|
"last": "Nenkova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ani Nenkova. 2008. Entity-driven rewrite for multi-document summarization.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Automated pyramid scoring of summaries using distributional semantics", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Rebecca", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Passonneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weiwei", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dolores", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Perin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "143--147", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rebecca J Passonneau, Emily Chen, Weiwei Guo, and Dolores Perin. 2013. Automated pyramid scoring of summaries using distributional semantics. In ACL (2), pages 143-147.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Wordnet:: Similarity: measuring the relatedness of concepts", |
|
"authors": [ |
|
{ |
|
"first": "Ted", |
|
"middle": [], |
|
"last": "Pedersen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siddharth", |
|
"middle": [], |
|
"last": "Patwardhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Michelizzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Demonstration papers at HLT-NAACL 2004", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "38--41", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ted Pedersen, Siddharth Patwardhan, and Jason Michelizzi. 2004. Wordnet:: Similarity: measuring the related- ness of concepts. In Demonstration papers at HLT-NAACL 2004, pages 38-41. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Modelling events through memory-based, open-ie patterns for abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Daniele", |
|
"middle": [], |
|
"last": "Pighin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Cornolti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Enrique", |
|
"middle": [], |
|
"last": "Alfonseca", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katja", |
|
"middle": [], |
|
"last": "Filippova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "ACL (1)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "892--901", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniele Pighin, Marco Cornolti, Enrique Alfonseca, and Katja Filippova. 2014. Modelling events through memory-based, open-ie patterns for abstractive summarization. In ACL (1), pages 892-901.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Align, disambiguate and walk: A unified approach for measuring semantic similarity", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Mohammad Taher Pilehvar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Jurgens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "ACL (1)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1341--1351", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohammad Taher Pilehvar, David Jurgens, and Roberto Navigli. 2013. Align, disambiguate and walk: A unified approach for measuring semantic similarity. In ACL (1), pages 1341-1351.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Centroid-based summarization of multiple documents: sentence extraction, utility-based evaluation, and user studies", |
|
"authors": [ |
|
{ |
|
"first": "Hongyan", |
|
"middle": [], |
|
"last": "Dragomir R Radev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Malgorzata", |
|
"middle": [], |
|
"last": "Jing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Budzikowska", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of the 2000 NAACL-ANLP Workshop on Automatic summarization", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "21--30", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dragomir R Radev, Hongyan Jing, and Malgorzata Budzikowska. 2000. Centroid-based summarization of mul- tiple documents: sentence extraction, utility-based evaluation, and user studies. In Proceedings of the 2000 NAACL-ANLP Workshop on Automatic summarization, pages 21-30. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "A neural attention model for abstractive sentence summarization", |
|
"authors": [ |
|
{ |
|
"first": "Sumit", |
|
"middle": [], |
|
"last": "Alexander M Rush", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Chopra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1509.00685" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexander M Rush, Sumit Chopra, and Jason Weston. 2015. A neural attention model for abstractive sentence summarization. arXiv preprint arXiv:1509.00685.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Generating semantically precise scene graphs from textual descriptions for improved image retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ranjay", |
|
"middle": [], |
|
"last": "Krishna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angel", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the Fourth Workshop on Vision and Language", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "70--80", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Schuster, Ranjay Krishna, Angel Chang, Li Fei-Fei, and Christopher D Manning. 2015. Generating semantically precise scene graphs from textual descriptions for improved image retrieval. In Proceedings of the Fourth Workshop on Vision and Language, pages 70-80.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "A participant-based approach for event summarization using twitter streams", |
|
"authors": [ |
|
{ |
|
"first": "Chao", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fuliang", |
|
"middle": [], |
|
"last": "Weng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "HLT-NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1152--1162", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chao Shen, Fei Liu, Fuliang Weng, and Tao Li. 2013. A participant-based approach for event summarization using twitter streams. In HLT-NAACL, pages 1152-1162.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Information status distinctions and referring expressions: An empirical study of references to people in news summaries", |
|
"authors": [ |
|
{ |
|
"first": "Advaith", |
|
"middle": [], |
|
"last": "Siddharthan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ani", |
|
"middle": [], |
|
"last": "Nenkova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathleen", |
|
"middle": [], |
|
"last": "Mckeown", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Computational Linguistics", |
|
"volume": "37", |
|
"issue": "4", |
|
"pages": "811--842", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Advaith Siddharthan, Ani Nenkova, and Kathleen McKeown. 2011. Information status distinctions and refer- ring expressions: An empirical study of references to people in news summaries. Computational Linguistics, 37(4):811-842.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Sentence compression with joint structural inference", |
|
"authors": [ |
|
{ |
|
"first": "Kapil", |
|
"middle": [], |
|
"last": "Thadani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathleen", |
|
"middle": [], |
|
"last": "Mckeown", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "65--74", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kapil Thadani and Kathleen McKeown. 2013. Sentence compression with joint structural inference. In CoNLL, pages 65-74.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Graph-based multi-modality learning for topic-focused multi-document summarization", |
|
"authors": [ |
|
{ |
|
"first": "Xiaojun", |
|
"middle": [], |
|
"last": "Wan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianguo", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "IJCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1586--1591", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaojun Wan and Jianguo Xiao. 2009. Graph-based multi-modality learning for topic-focused multi-document summarization. In IJCAI, pages 1586-1591.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Domain-independent abstract generation for focused meeting summarization", |
|
"authors": [ |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Cardie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "ACL (1)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1395--1405", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lu Wang and Claire Cardie. 2013. Domain-independent abstract generation for focused meeting summarization. In ACL (1), pages 1395-1405.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "A transition-based algorithm for amr parsing", |
|
"authors": [ |
|
{ |
|
"first": "Chuan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nianwen", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Pradhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Pradhan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "HLT-NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "366--375", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chuan Wang, Nianwen Xue, Sameer Pradhan, and Sameer Pradhan. 2015. A transition-based algorithm for amr parsing. In HLT-NAACL, pages 366-375.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "The Knowledge Grid: Toward Cyber-Physical Society", |
|
"authors": [ |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Zhuge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hai Zhuge. 2012. The Knowledge Grid: Toward Cyber-Physical Society. World Scientific.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Multi-dimensional summarization in cyber-physical society", |
|
"authors": [ |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Zhuge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hai Zhuge. 2016. Multi-dimensional summarization in cyber-physical society. Morgan Kaufmann.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "An example illustrating the framework of our summarization system." |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "if e1af ter/bef ore/overlap \u2212 \u2212\u2212\u2212\u2212\u2212\u2212\u2212\u2212\u2212\u2212\u2212\u2212\u2212\u2212\u2212 \u2192 e2, then generate \"e1 after/before/when e2\"; if e1 ce \u2212 \u2212 \u2192 e2,then generate \"Because e1, e2, \" and \"e2 because e1\";" |
|
}, |
|
"TABREF0": { |
|
"html": null, |
|
"text": "The features for the event relation prediction model", |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"text": "Event and concept features (all binaries)", |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"text": "The average number of concepts, events, event relations and relations after expanding of each topic in annotated DUC2007 (including 45 topics, each topic has 25 documents).", |
|
"content": "<table><tr><td/><td>precision</td><td>recall</td><td>F1-score</td></tr><tr><td>Concept Extraction</td><td>N/A</td><td>0.7928</td><td>N/A</td></tr><tr><td>Event Action identification</td><td>0.8532</td><td>0.8468</td><td>0.8499</td></tr><tr><td>Event Mention extraction</td><td>0.7272</td><td>0.7067</td><td>0.7168</td></tr><tr><td>Event Relations prediction</td><td>0.5894</td><td>0.6222</td><td>0.6054</td></tr></table>", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"html": null, |
|
"text": "Comparison of ROUGE scores (F-score) and Pyramid scores onDUC 2006 and 2007(test set).", |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |