|
{ |
|
"paper_id": "C14-1027", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:23:05.400656Z" |
|
}, |
|
"title": "Rapid Development of a Corpus with Discourse Annotations using Two-stage Crowdsourcing", |
|
"authors": [ |
|
{ |
|
"first": "Daisuke", |
|
"middle": [], |
|
"last": "Kawahara", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Japan Science and Technology Agency \u00a7 Yahoo Japan Corporation", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yuichiro", |
|
"middle": [], |
|
"last": "Machida", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Japan Science and Technology Agency \u00a7 Yahoo Japan Corporation", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Tomohide", |
|
"middle": [], |
|
"last": "Shibata", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Japan Science and Technology Agency \u00a7 Yahoo Japan Corporation", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Sadao", |
|
"middle": [], |
|
"last": "Kurohashi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Japan Science and Technology Agency \u00a7 Yahoo Japan Corporation", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hayato", |
|
"middle": [], |
|
"last": "Kobayashi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Japan Science and Technology Agency \u00a7 Yahoo Japan Corporation", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Manabu", |
|
"middle": [], |
|
"last": "Sassano", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Japan Science and Technology Agency \u00a7 Yahoo Japan Corporation", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We present a novel approach for rapidly developing a corpus with discourse annotations using crowdsourcing. Although discourse annotations typically require much time and cost owing to their complex nature, we realize discourse annotations in an extremely short time while retaining good quality of the annotations by crowdsourcing two annotation subtasks. In fact, our experiment to create a corpus comprising 30,000 Japanese sentences took less than eight hours to run. Based on this corpus, we also develop a supervised discourse parser and evaluate its performance to verify the usefulness of the acquired corpus.", |
|
"pdf_parse": { |
|
"paper_id": "C14-1027", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We present a novel approach for rapidly developing a corpus with discourse annotations using crowdsourcing. Although discourse annotations typically require much time and cost owing to their complex nature, we realize discourse annotations in an extremely short time while retaining good quality of the annotations by crowdsourcing two annotation subtasks. In fact, our experiment to create a corpus comprising 30,000 Japanese sentences took less than eight hours to run. Based on this corpus, we also develop a supervised discourse parser and evaluate its performance to verify the usefulness of the acquired corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Humans understand text not by individually interpreting clauses or sentences, but by linking such a text fragment with another in a particular context. To allow computers to understand text, it is essential to capture the precise relations between these text fragments. This kind of analysis is called discourse parsing or discourse structure analysis, and is an important and fundamental task in natural language processing (NLP). Systems for discourse parsing are, however, available only for major languages, such as English, owing to the lack of corpora with discourse annotations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "For English, several corpora with discourse annotations have been developed manually, consuming a great deal of time and cost in the process. These include the Penn Discourse Treebank (Prasad et al., 2008) , RST Discourse Treebank (Carlson et al., 2001) , and Discourse Graphbank (Wolf and Gibson, 2005) . Discourse parsers trained on these corpora have also been developed and practically used. To create the same resource-rich environment for another language, a quicker method than the conventional time-consuming framework should be sought. One possible approach is to use crowdsourcing, which has actively been used to produce various language resources in recent years (e.g., (Snow et al., 2008; Negri et al., 2011; Hong and Baker, 2011; Fossati et al., 2013) ). It is, however, difficult to crowdsource the difficult judgments for discourse annotations, which typically consists of two steps: finding a pair of spans with a certain relation and identifying the relation between the pair.", |
|
"cite_spans": [ |
|
{ |
|
"start": 184, |
|
"end": 205, |
|
"text": "(Prasad et al., 2008)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 231, |
|
"end": 253, |
|
"text": "(Carlson et al., 2001)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 280, |
|
"end": 303, |
|
"text": "(Wolf and Gibson, 2005)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 682, |
|
"end": 701, |
|
"text": "(Snow et al., 2008;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 702, |
|
"end": 721, |
|
"text": "Negri et al., 2011;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 722, |
|
"end": 743, |
|
"text": "Hong and Baker, 2011;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 744, |
|
"end": 765, |
|
"text": "Fossati et al., 2013)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we propose a method for crowdsourcing discourse annotations that simplifies the procedure by dividing it into two steps. The point is that by simplifying the annotation task it is suitable for crowdsourcing, but does not skew the annotations for use in practical discourse parsing. First, finding a discourse unit for the span is a costly process, and thus we adopt a clause as the discourse unit, since this is reliable enough to be automatically detected. We also limit the length of each target document to three sentences and at most five clauses to facilitate the annotation task. Secondly, we detect and annotate clause pairs in a document that hold logical discourse relations. However, since this is too complicated to assign as one task using crowdsourcing, we divide the task into two steps: determining the existence of logical discourse relations and annotating the type of relation. Our two-stage approach is a robust method in that it confirms the existence of the discourse relations twice. We also designed the tagset of discourse relations for crowdsourcing, which consists of two layers, where the upper layer contains the following three classes: \"CONTINGENCY,\" \"COMPARISON\" and \"OTHER.\" Although the task settings are simplified for crowdsourcing, the obtained corpus and knowledge of discourse parsing could be still useful in general discourse parsing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In our experiments, we crowdsourced discourse annotations for Japanese, for which there are no publicly available corpora with discourse annotations. The resulting corpus consists of 10,000 documents, each of which comprises three sentences extracted from the web. Carrying out this two-stage crowdsourcing task took less than eight hours. The time elapsed was significantly shorter than the conventional corpus building method.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We also developed a discourse parser by exploiting the acquired corpus with discourse annotations. We learned a machine learning-based model for discourse parsing based on this corpus and evaluated its performance. An F1 value of 37.9% was achieved for contingency relations, which would be roughly comparable with state-of-the-art discourse parsers on English. This result indicates the usefulness of the acquired corpus. The resulting discourse parser would be effectively exploited in NLP applications, such as sentiment analysis (Zirn et al., 2011) and contradiction detection (Murakami et al., 2009; Ennals et al., 2010) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 533, |
|
"end": 552, |
|
"text": "(Zirn et al., 2011)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 581, |
|
"end": 604, |
|
"text": "(Murakami et al., 2009;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 605, |
|
"end": 625, |
|
"text": "Ennals et al., 2010)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The novel contributions of this study are summarized below:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We propose a framework for developing a corpus with discourse annotations using two-stage crowdsourcing, which is both cheap and quick to execute, but still retains good quality of the annotations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We construct a Japanese discourse corpus in an extremely short time.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We develop a discourse parser based on the acquired corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The remainder of this paper is organized as follows. Section 2 introduces related work, while Section 3 describes our proposed framework and reports the experimental results for the creation of a corpus with discourse annotations. Section 4 presents a method for discourse parsing based on the corpus as well as some experimental results. Section 5 concludes the paper. Snow et al. (2008) applied crowdsourcing to five NLP annotation tasks, but the settings of these tasks are very simple. There have also been several attempts to construct language resources with complex annotations using crowdsourcing. Negri et al. (2011) proposed a method for developing a cross-lingual textual entailment (CLTE) corpus using crowdsourcing. They tackled this complex data creation task by dividing it into several simple subtasks: sentence modification, type annotation and sentence translation. The creative CLTE task and subtasks are quite different from our non-creative task and subtasks of discourse annotations. Fossati et al. (2013) proposed FrameNet annotations using crowdsourcing. Their method is a single-step approach to only detect frame elements. They verified the usefulness of their approach through an experiment on a small set of verbs with only two frame ambiguities per verb. Although they seem to be running a larger-scale experiment, its result has not been revealed yet. Hong and Baker (2011) presented a crowdsourcing method for selecting FrameNet frames, which is a part of the FrameNet annotation process. Since their task is equivalent to word sense disambiguation, it is not very complex compared to the whole FrameNet annotation process. These FrameNet annotations are still different from discourse annotations, which are our target. To the best of our knowledge, there have been no attempts to crowdsource discourse annotations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 370, |
|
"end": 388, |
|
"text": "Snow et al. (2008)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 606, |
|
"end": 625, |
|
"text": "Negri et al. (2011)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1006, |
|
"end": 1027, |
|
"text": "Fossati et al. (2013)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There are several manually-crafted corpora with discourse annotation for English, such as the Penn Discourse Treebank (Prasad et al., 2008) , RST Discourse Treebank (Carlson et al., 2001) , and Discourse Graphbank (Wolf and Gibson, 2005) . These corpora were developed from English newspaper articles. Several attempts have been made to manually create corpora with discourse annotations for languages other than English. These include the Potsdam Commentary Corpus (Stede, 2004) for German (newspaper; 2,900 sentences), Rhetalho (Pardo et al., 2004) for Portuguese (scientific papers; 100 documents; 1,350 sentences), and the RST Spanish Treebank for Spanish (da Cunha et al., 2011) (several genres; 267 documents; 2,256 sentences). All of these consist of relatively small numbers of sentences compared with the English corpora containing several tens of thousands sentences.", |
|
"cite_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 139, |
|
"text": "(Prasad et al., 2008)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 165, |
|
"end": 187, |
|
"text": "(Carlson et al., 2001)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 214, |
|
"end": 237, |
|
"text": "(Wolf and Gibson, 2005)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 466, |
|
"end": 479, |
|
"text": "(Stede, 2004)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 530, |
|
"end": 550, |
|
"text": "(Pardo et al., 2004)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In recent years, there have been many studies on discourse parsing on the basis of the above handannotated corpora (e.g., Subba and Di Eugenio, 2009; Hernault et al., 2010; Ghosh et al., 2011; Lin et al., 2012; Feng and Hirst, 2012; Joty et al., 2012; Joty et al., 2013; Biran and McKeown, 2013; Lan et al., 2013) ). This surge of research on discourse parsing can be attributed to the existence of corpora with discourse annotations. However, the target language is mostly English since English is the only language that has large-scale discourse corpora. To develop and improve discourse parsers for languages other than English, it is necessary to build large-scale annotated corpora, especially in a short period if possible.", |
|
"cite_spans": [ |
|
{ |
|
"start": 122, |
|
"end": 149, |
|
"text": "Subba and Di Eugenio, 2009;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 150, |
|
"end": 172, |
|
"text": "Hernault et al., 2010;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 173, |
|
"end": 192, |
|
"text": "Ghosh et al., 2011;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 193, |
|
"end": 210, |
|
"text": "Lin et al., 2012;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 232, |
|
"text": "Feng and Hirst, 2012;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 233, |
|
"end": 251, |
|
"text": "Joty et al., 2012;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 252, |
|
"end": 270, |
|
"text": "Joty et al., 2013;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 271, |
|
"end": 295, |
|
"text": "Biran and McKeown, 2013;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 296, |
|
"end": 313, |
|
"text": "Lan et al., 2013)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "3 Development of Corpus with Discourse Annotations using Crowdsourcing", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We develop a tagged corpus in which pairs of discourse units are annotated with discourse relations. To achieve this, it is necessary to determine target documents, discourse units, and a discourse relation tagset. The following subsections explain the details of these three aspects.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpus Specifications", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In previous studies on constructing discourse corpora, the target documents were mainly newspaper texts, such as the Wall Street Journal for English. However, discourse parsers trained on such newspaper corpora usually have a problem of domain adaptation. That is to say, while discourse parsers trained on newspaper corpora are good at analyzing newspaper texts, they generally cannot perform well on texts of other domains.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Text and Discourse Unit", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "To address this problem, we set out to create an annotated corpus covering a variety of domains. Since the web contains many documents across a variety of domains, we use the Diverse Document Leads Corpus (Hangyo et al., 2012) , which was extracted from the web. Each document in this corpus consists of the first three sentences of a Japanese web page, making these short documents suitable for our discourse annotation method based on crowdsourcing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 205, |
|
"end": 226, |
|
"text": "(Hangyo et al., 2012)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Text and Discourse Unit", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "We adopt the clause as a discourse unit, since spans are too fine-grained to annotate using crowdsourcing and sentences are too coarse-grained to capture discourse relations. Clauses, which are automatically identified, do not need to be manually modified since they are thought to be reliable enough. Clause identification is performed using the rules of Shibata and Kurohashi (2005) . For example, the following rules are used to identify clauses as our discourse units:", |
|
"cite_spans": [ |
|
{ |
|
"start": 356, |
|
"end": 384, |
|
"text": "Shibata and Kurohashi (2005)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Text and Discourse Unit", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "\u2022 clauses that function as a relatively strong boundary in a sentence are adopted,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Text and Discourse Unit", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "\u2022 relative clauses are excluded.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Text and Discourse Unit", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "Since workers involved in our crowdsourcing task need to judge whether clause pairs have discourse relations, the load of these workers increases combinatorially as the number of clauses in a sentence increases. To alleviate this problem, we limit the number of clauses in a document to five. This limitation excludes only about 5% of the documents in the original corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Text and Discourse Unit", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "Our corpus consists of 10,000 documents corresponding to 30,000 sentences. The total number of clauses in this corpus is 39,032, and thus the average number of clauses in a document is 3.9. The total number of clause pairs is 59,426.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Text and Discourse Unit", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "One of our supposed applications of discourse parsing is to automatically generate a bird's eye view of a controversial topic as in Statement Map (Murakami et al., 2009) and Dispute Finder (Ennals et al., 2010) , which identify various relations between statements, including contradictory relations. We assume that expansion relations, such as elaboration and restatement, and temporal relations are not important for this purpose. This setting is similar to the work of Bethard et al. (2008) , which annotated temporal relations independently of causal relations. We also suppose that temporal relations can be annotated separately for NLP applications that require temporal information. We determined the tagset of discourse relations Note that we do not consider the direction of relations to simplify the annotation task for crowdsourcing. Table 1 shows examples of our tagset. Therefore, our task is to annotate clause pairs in a document with one of the discourse relations given above. Sample annotations of a document are shown below. Here, clause boundaries are shown by \"::\" and clause pairs that are not explicitly marked are allocated the \"OTHER\" relation. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 146, |
|
"end": 169, |
|
"text": "(Murakami et al., 2009)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 189, |
|
"end": 210, |
|
"text": "(Ennals et al., 2010)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 472, |
|
"end": 493, |
|
"text": "Bethard et al. (2008)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 845, |
|
"end": 852, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discourse Relation Tagset", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "We create a corpus with discourse annotations using two-stage crowdsourcing. We divide the annotation task into the following two subtasks: determining whether a clause pair has a discourse relation excluding \"OTHER,\" and then, ascertaining the type of discourse relation for a clause pair that passes the first stage. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Two-stage Crowdsourcing for Discourse Annotations", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "This subtask determines whether each clause pair in a document has one of the following discourse relations: Cause/Reason, Purpose, Condition, Ground, Contrast, and Concession (that is, all the relations except \"OTHER\"). Workers are shown examples of these relations and asked to determine only the existence thereof. In this subtask, an item presented to a worker at a particular time consists of all the judgments of clause pairs in a document. By adopting this approach, each worker considers the entire document when making his/her judgments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Stage 1: Judgment of Discourse Relation Existence", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "This subtask involves ascertaining the discourse relation type for a clause pair that passes the first stage. The result of this subtask is one of the seven lower types in our discourse relation tagset. Workers are shown examples of these types and then asked to select one of the relations. If a worker chooses \"OTHER,\" this corresponds to canceling the positive determination of the existence of the discourse relation in stage one.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Stage 2: Judgment of Discourse Relation Type", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "In this subtask, an item is the judgment of a clause pair. That is, if a document contains more than one clause pair that must be judged, the judgments for this document are divided into multiple items, although this is rare.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Stage 2: Judgment of Discourse Relation Type", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "We conducted an experiment of the two-stage crowdsourcing approach using Yahoo! Crowdsourcing. 1 To increase the reliability of the produced corpus, we set the number of workers for each item for each task to 10. The reason why we chose this value is as follows. While Snow et al. (2008) claimed that an average of 4 non-expert labels per item in order to emulate expert-level label quality, the quality of some tasks increased by increasing the number of workers to 10. We also tested hidden gold-standard items once every 10 items to examine worker's quality. If a worker failed these items in serial, he/she would have to take a test to continue the task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 96, |
|
"text": "1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 269, |
|
"end": 287, |
|
"text": "Snow et al. (2008)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiment and Discussion", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We obtained judgments for the 59,426 clause pairs in the 10,000 documents of our corpus in the first stage of crowdsourcing, i.e., the subtask of determining the existence of discourse relations. We calculated the probability of each label using GLAD 2 (Whitehill et al., 2009) , which was proved to be more reliable than the majority voting. This probability corresponds to the probability of discourse relation existence of each clause pair. Table 2 lists the results. We set a probability threshold to select those clause pairs whose types were to be judged in the second stage of crowdsourcing. With this threshold set to 0.01, 9,068 clause pairs (15.3% of all the clause pairs) were selected. The threshold was set fairly low to allow low-probability judgments to be re-examined in the second stage. The discourse relation types of the 9,068 clause pairs were determined in the second stage of crowdsourcing. We extended GLAD (Whitehill et al., 2009) for application to multi-class tasks, and calculated the probability of the labels of each clause pair. We assigned the label (discourse relation type) with the highest probability to each clause pair. Table 3 gives some statistics of the results. The second column in this table denotes the numbers of each discourse relation type, while the third column gives the numbers of each type of clause pair with a probability higher than 0.80. Table 4 gives statistics of the results when the lower discourse relation types are merged into the upper types. Table 5 shows some examples of the resulting annotations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 253, |
|
"end": 277, |
|
"text": "(Whitehill et al., 2009)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 931, |
|
"end": 955, |
|
"text": "(Whitehill et al., 2009)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 444, |
|
"end": 451, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 1158, |
|
"end": 1165, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 1395, |
|
"end": 1402, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 1508, |
|
"end": 1515, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiment and Discussion", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Carrying out the two separate subtasks using crowdsourcing took approximately three hours and five hours with 1,458 and 1,100 workers, respectively. If we conduct this task at a single stage, it would take approximately 33 (5 hours / 0.153) hours. It would be four times longer than our two-stage approach. Such single-stage approach is also not robust since it does not have a double check mechanism, with which the two-stage approach is equipped. We spent 111 thousand yen and 113 thousand yen (approximately 1,100 USD, respectively) for these subtasks, which would be extremely less expensive than the projects of conventional discourse annotations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiment and Discussion", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "For the examples in Table 5 , we confirmed that the discourse relation types of the top four examples were surely correct. However, we judged the type (Contrast) of the bottom example as incorrect. Since the second clause is an instantiation of the first clause, the correct type should be \"Other.\" We found such errors especially in the clause pairs with a probability lower than 0.80.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 20, |
|
"end": 27, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiment and Discussion", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "To verify the usefulness of the acquired corpus with discourse annotations, we developed a supervised discourse parser based on the corpus, and evaluated its performance. We built two discourse parsers using the annotations of the lower and upper discourse relation types, respectively. From the annotations in the first stage of crowdsourcing (i.e., judging the existence of discourse relations), we assigned annotations with a probability less than 0.01 as \"OTHER.\" Of the annotations acquired in the second stage (i.e., judging discourse relation types), we adopted those with a probability greater than 0.80 and discarded the rest. After this preprocessing, we obtained 58,135 (50,358 + 7,777) instances of clause pairs for the lower-type discourse parser and 58,521 (50,358 + 8,163) Table 5 : Examples of Annotations. The first column denotes the estimated label probability and the second column denotes the number of workers that assigned the designated type. In the fourth column, the clause pair annotated with the type is marked with ([ ] in English translations).", |
|
"cite_spans": [ |
|
{ |
|
"start": 771, |
|
"end": 787, |
|
"text": "(50,358 + 8,163)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 788, |
|
"end": 795, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Development of Discourse Parser based on Acquired Discourse Corpus", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "discourse parser. Of these, 4,024 (6.9%) and 4,410 (7.5%) instances, respectively, had one of the types besides \"OTHER.\" We conducted experiments using five-fold cross validation on these instances.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Development of Discourse Parser based on Acquired Discourse Corpus", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "To extract features of machine learning, we applied the Japanese morphological analyzer, JUMAN, 3 and the Japanese dependency parser, KNP, 4 to the corpus. We used the features listed in Table 6 , which are usually used for discourse parsing.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 187, |
|
"end": 194, |
|
"text": "Table 6", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Development of Discourse Parser based on Acquired Discourse Corpus", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We adopted Opal (Yoshinaga and Kitsuregawa, 2010) 5 for the machine learning implementation. This tool enables online learning using a polynomial kernel. As parameters for Opal, we used the passiveaggressive algorithm (PA-I) with a polynomial kernel of degree two as a learner and the extension to multi-class classification (Matsushima et al., 2010) . The numbers of classes were seven and three for the lower-and upper-type discourse parsers, respectively. We set the aggressiveness parameter C to 0.001, which generally achieves good performance for many classification tasks. Other parameters were set to the default values of Opal.", |
|
"cite_spans": [ |
|
{ |
|
"start": 325, |
|
"end": 350, |
|
"text": "(Matsushima et al., 2010)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Development of Discourse Parser based on Acquired Discourse Corpus", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "To measure the performance of the discourse parsers, we adopted precision, recall and their harmonic mean (F1). These metrics were calculated as the proportion of the number of correct clause pairs to the Name Description clause distance clause distance between two clauses sentence distance sentence distance between two clauses bag of words bag of words (lemmas) for each clause predicate a content word (lemma) of the predicate of each clause conjugation form of predicate a conjugation form of the predicate of each clause conjunction a conjunction if it is located at the beginning of a clause word overlapping ratio an overlapping ratio of words between the two clauses clause type a lexical type output by KNP for each clause (about 100 types) topic marker existence existence of a topic marker in each clause topic marker cooccurrence existence of a topic marker in both clauses Tables 7 and 8 give the accuracies for the lower-and upper-type discourse parsers, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 887, |
|
"end": 901, |
|
"text": "Tables 7 and 8", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Development of Discourse Parser based on Acquired Discourse Corpus", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "From Table 8 , we can see that our upper-type discourse parser achieved an F1 of 37.9% for contingency relations. It is difficult to compare our results with those in previous work due to the use of different data set and different languages. We, however, anticipate that our results would be comparable with those of state-of-the-art English discourse parsers. For example, the end-to-end discourse parser of Lin et al. (2012) achieved an F1 of 20.6% -46.8% on the Penn Discourse Treebank.", |
|
"cite_spans": [ |
|
{ |
|
"start": 410, |
|
"end": 427, |
|
"text": "Lin et al. (2012)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 12, |
|
"text": "Table 8", |
|
"ref_id": "TABREF10" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Development of Discourse Parser based on Acquired Discourse Corpus", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We also obtained a low F1 for comparison relations. This tendency is similar to the previous results on the Penn Discourse Treebank. The biggest cause of this low F1 is the lack of unambiguous explicit discourse connectives for these relations. Although there are explicit discourse connectives in Japanese, many of them have multiple meanings and cannot be used as a direct clue for discourse relation detection (e.g., as described in Kaneko and Bekki (2014) ). As reported in and other studies, the identification of implicit discourse relations are notoriously difficult. To improve its performance, we need to incorporate external knowledge sources other than the training data into the discourse parsers. A promising way is to use large-scale knowledge resources that are automatically acquired from raw corpora.", |
|
"cite_spans": [ |
|
{ |
|
"start": 436, |
|
"end": 459, |
|
"text": "Kaneko and Bekki (2014)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Development of Discourse Parser based on Acquired Discourse Corpus", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We presented a rapid approach for building a corpus with discourse annotations and a discourse parser using two-stage crowdsourcing. The acquired corpus is made publicly available and can be used for research purposes. 6 This corpus can be used not only to build a discourse parser but also to evaluate its performance. The availability of the corpus with discourse annotations will accelerate the development and improvement of discourse parsing. In the future, we intend integrating automatically acquired knowledge from corpora into the discourse parsers to further enhance their performance. We also aim to apply our framework to other languages without available corpora with discourse annotations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "http://crowdsourcing.yahoo.co.jp/ 2 http://mplab.ucsd.edu/\u02dcjake/OptimalLabelingRelease1.0.3.tar.gz", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://nlp.ist.i.kyoto-u.ac.jp/EN/?JUMAN 4 http://nlp.ist.i.kyoto-u.ac.jp/EN/?KNP 5 http://www.tkl.iis.u-tokyo.ac.jp/\u02dcynaga/opal/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://nlp.ist.i.kyoto-u.ac.jp/EN/?DDLC", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Building a corpus of temporalcausal structure", |
|
"authors": [ |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bethard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Corvey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Klingenstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Martin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 6th International Conference on Language Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "908--915", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steven Bethard, William Corvey, Sara Klingenstein, and James H. Martin. 2008. Building a corpus of temporal- causal structure. In Proceedings of the 6th International Conference on Language Resources and Evaluation, pages 908-915.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Aggregated word pair features for implicit discourse relation disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Or", |
|
"middle": [], |
|
"last": "Biran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathleen", |
|
"middle": [], |
|
"last": "Mckeown", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "69--73", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Or Biran and Kathleen McKeown. 2013. Aggregated word pair features for implicit discourse relation disam- biguation. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 69-73.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Building a discourse-tagged corpus in the framework of rhetorical structure theory", |
|
"authors": [ |
|
{ |
|
"first": "Lynn", |
|
"middle": [], |
|
"last": "Carlson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Marcu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mary", |
|
"middle": [ |
|
"Ellen" |
|
], |
|
"last": "Okurowski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the Second SIGdial Workshop on Discourse and Dialogue", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lynn Carlson, Daniel Marcu, and Mary Ellen Okurowski. 2001. Building a discourse-tagged corpus in the framework of rhetorical structure theory. In Proceedings of the Second SIGdial Workshop on Discourse and Dialogue.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "On the development of the RST Spanish treebank", |
|
"authors": [ |
|
{ |
|
"first": "Juan-Manuel", |
|
"middle": [], |
|
"last": "Iria Da Cunha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerardo", |
|
"middle": [], |
|
"last": "Torres-Moreno", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sierra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 5th Linguistic Annotation Workshop (LAW V)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--10", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iria da Cunha, Juan-Manuel Torres-Moreno, and Gerardo Sierra. 2011. On the development of the RST Spanish treebank. In Proceedings of the 5th Linguistic Annotation Workshop (LAW V), pages 1-10.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Highlighting disputed claims on the web", |
|
"authors": [ |
|
{ |
|
"first": "Rob", |
|
"middle": [], |
|
"last": "Ennals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beth", |
|
"middle": [], |
|
"last": "Trushkowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"Mark" |
|
], |
|
"last": "Agosta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 19th international conference on World Wide Web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "341--350", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rob Ennals, Beth Trushkowsky, and John Mark Agosta. 2010. Highlighting disputed claims on the web. In Proceedings of the 19th international conference on World Wide Web, pages 341-350.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Text-level discourse parsing with rich linguistic features", |
|
"authors": [ |
|
{ |
|
"first": "Vanessa", |
|
"middle": [], |
|
"last": "Wei Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graeme", |
|
"middle": [], |
|
"last": "Hirst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "60--68", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vanessa Wei Feng and Graeme Hirst. 2012. Text-level discourse parsing with rich linguistic features. In Proceed- ings of the 50th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 60-68. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Outsourcing FrameNet to the crowd", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Fossati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claudio", |
|
"middle": [], |
|
"last": "Giuliano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Tonelli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "742--747", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Fossati, Claudio Giuliano, and Sara Tonelli. 2013. Outsourcing FrameNet to the crowd. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics, pages 742-747.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "End-to-end discourse parser evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Sucheta", |
|
"middle": [], |
|
"last": "Ghosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Tonelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giuseppe", |
|
"middle": [], |
|
"last": "Riccardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Johansson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Fifth IEEE International Conference on Semantic Computing (ICSC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "169--172", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sucheta Ghosh, Sara Tonelli, Giuseppe Riccardi, and Richard Johansson. 2011. End-to-end discourse parser evaluation. In Fifth IEEE International Conference on Semantic Computing (ICSC), pages 169-172.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Building a diverse document leads corpus annotated with semantic relations", |
|
"authors": [ |
|
{ |
|
"first": "Masatsugu", |
|
"middle": [], |
|
"last": "Hangyo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daisuke", |
|
"middle": [], |
|
"last": "Kawahara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sadao", |
|
"middle": [], |
|
"last": "Kurohashi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of 26th Pacific Asia Conference on Language Information and Computing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "535--544", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Masatsugu Hangyo, Daisuke Kawahara, and Sadao Kurohashi. 2012. Building a diverse document leads corpus annotated with semantic relations. In Proceedings of 26th Pacific Asia Conference on Language Information and Computing, pages 535-544.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "HILDA: A discourse parser using support vector machine classification", |
|
"authors": [ |
|
{ |
|
"first": "Hugo", |
|
"middle": [], |
|
"last": "Hernault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Helmut", |
|
"middle": [], |
|
"last": "Prendinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mitsuru", |
|
"middle": [], |
|
"last": "Ishizuka", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Dialogue & Discourse", |
|
"volume": "1", |
|
"issue": "3", |
|
"pages": "1--33", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hugo Hernault, Helmut Prendinger, David duVerle, and Mitsuru Ishizuka. 2010. HILDA: A discourse parser using support vector machine classification. Dialogue & Discourse, 1(3):1-33.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "How good is the crowd at \"real\" WSD?", |
|
"authors": [ |
|
{ |
|
"first": "Jisup", |
|
"middle": [], |
|
"last": "Hong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Collin", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Baker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 5th Linguistic Annotation Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "30--37", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jisup Hong and Collin F. Baker. 2011. How good is the crowd at \"real\" WSD? In Proceedings of the 5th Linguistic Annotation Workshop, pages 30-37.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "A novel discriminative framework for sentence-level discourse analysis", |
|
"authors": [ |
|
{ |
|
"first": "Shafiq", |
|
"middle": [], |
|
"last": "Joty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giuseppe", |
|
"middle": [], |
|
"last": "Carenini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "904--915", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shafiq Joty, Giuseppe Carenini, and Raymond Ng. 2012. A novel discriminative framework for sentence-level discourse analysis. In Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pages 904-915.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Combining intra-and multi-sentential rhetorical parsing for document-level discourse analysis", |
|
"authors": [ |
|
{ |
|
"first": "Shafiq", |
|
"middle": [], |
|
"last": "Joty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giuseppe", |
|
"middle": [], |
|
"last": "Carenini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yashar", |
|
"middle": [], |
|
"last": "Mehdad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "486--496", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shafiq Joty, Giuseppe Carenini, Raymond Ng, and Yashar Mehdad. 2013. Combining intra-and multi-sentential rhetorical parsing for document-level discourse analysis. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics, pages 486-496.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Building a Japanese corpus of temporal-causal-discourse structures based on SDRT for extracting causal relations", |
|
"authors": [ |
|
{ |
|
"first": "Kimi", |
|
"middle": [], |
|
"last": "Kaneko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daisuke", |
|
"middle": [], |
|
"last": "Bekki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the EACL 2014 Workshop on Computational Approaches to Causality in Language (CAtoCL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "33--39", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kimi Kaneko and Daisuke Bekki. 2014. Building a Japanese corpus of temporal-causal-discourse structures based on SDRT for extracting causal relations. In Proceedings of the EACL 2014 Workshop on Computational Approaches to Causality in Language (CAtoCL), pages 33-39.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Leveraging synthetic discourse data via multi-task learning for implicit discourse relation recognition", |
|
"authors": [ |
|
{ |
|
"first": "Man", |
|
"middle": [], |
|
"last": "Lan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengyu", |
|
"middle": [], |
|
"last": "Niu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "476--485", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Man Lan, Yu Xu, and Zhengyu Niu. 2013. Leveraging synthetic discourse data via multi-task learning for implicit discourse relation recognition. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 476-485.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "A PDTB-styled end-to-end discourse parser", |
|
"authors": [ |
|
{ |
|
"first": "Ziheng", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min-Yen", |
|
"middle": [], |
|
"last": "Hwee Tou Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Natural Language Engineering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--34", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ziheng Lin, Hwee Tou Ng, and Min-Yen Kan. 2012. A PDTB-styled end-to-end discourse parser. Natural Language Engineering, pages 1-34.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Exact passive-aggressive algorithm for multiclass classification using support class", |
|
"authors": [ |
|
{ |
|
"first": "Shin", |
|
"middle": [], |
|
"last": "Matsushima", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nobuyuki", |
|
"middle": [], |
|
"last": "Shimizu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazuhiro", |
|
"middle": [], |
|
"last": "Yoshida", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takashi", |
|
"middle": [], |
|
"last": "Ninomiya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroshi", |
|
"middle": [], |
|
"last": "Nakagawa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of 2010 SIAM International Conference on Data Mining (SDM2010)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "303--314", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shin Matsushima, Nobuyuki Shimizu, Kazuhiro Yoshida, Takashi Ninomiya, and Hiroshi Nakagawa. 2010. Exact passive-aggressive algorithm for multiclass classification using support class. In Proceedings of 2010 SIAM International Conference on Data Mining (SDM2010), pages 303-314.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Statement map: Assisting information credibility analysis by visualizing arguments", |
|
"authors": [ |
|
{ |
|
"first": "Koji", |
|
"middle": [], |
|
"last": "Murakami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Nichols", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suguru", |
|
"middle": [], |
|
"last": "Matsuyoshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Asuka", |
|
"middle": [], |
|
"last": "Sumida", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shouko", |
|
"middle": [], |
|
"last": "Masuda", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 3rd Workshop on Information Credibility on the Web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "43--50", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Koji Murakami, Eric Nichols, Suguru Matsuyoshi, Asuka Sumida, Shouko Masuda, Kentaro Inui, and Yuji Mat- sumoto. 2009. Statement map: Assisting information credibility analysis by visualizing arguments. In Pro- ceedings of the 3rd Workshop on Information Credibility on the Web, pages 43-50.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Divide and conquer: Crowdsourcing the creation of cross-lingual textual entailment corpora", |
|
"authors": [ |
|
{ |
|
"first": "Matteo", |
|
"middle": [], |
|
"last": "Negri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luisa", |
|
"middle": [], |
|
"last": "Bentivogli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yashar", |
|
"middle": [], |
|
"last": "Mehdad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danilo", |
|
"middle": [], |
|
"last": "Giampiccolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Marchetti", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "670--679", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matteo Negri, Luisa Bentivogli, Yashar Mehdad, Danilo Giampiccolo, and Alessandro Marchetti. 2011. Divide and conquer: Crowdsourcing the creation of cross-lingual textual entailment corpora. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 670-679.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Dizer: An automatic discourse analyzer for Brazilian Portuguese", |
|
"authors": [ |
|
{ |
|
"first": "Thiago Alexandre Salgueiro", |
|
"middle": [], |
|
"last": "Pardo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Das Gra\u00e7as Volpe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia Helena Machado", |
|
"middle": [], |
|
"last": "Nunes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rino", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Advances in Artificial Intelligence-SBIA 2004", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "224--234", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thiago Alexandre Salgueiro Pardo, Maria das Gra\u00e7as Volpe Nunes, and Lucia Helena Machado Rino. 2004. Dizer: An automatic discourse analyzer for Brazilian Portuguese. In Advances in Artificial Intelligence-SBIA 2004, pages 224-234. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Using syntax to disambiguate explicit discourse connectives in text", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Pitler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ani", |
|
"middle": [], |
|
"last": "Nenkova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the ACL-IJCNLP 2009 Conference Short Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "13--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily Pitler and Ani Nenkova. 2009. Using syntax to disambiguate explicit discourse connectives in text. In Proceedings of the ACL-IJCNLP 2009 Conference Short Papers, pages 13-16.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Automatic sense prediction for implicit discourse relations in text", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Pitler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Annie", |
|
"middle": [], |
|
"last": "Louis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ani", |
|
"middle": [], |
|
"last": "Nenkova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "683--691", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily Pitler, Annie Louis, and Ani Nenkova. 2009. Automatic sense prediction for implicit discourse relations in text. In Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP, pages 683-691.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "The Penn discourse treebank 2.0", |
|
"authors": [ |
|
{ |
|
"first": "Rashmi", |
|
"middle": [], |
|
"last": "Prasad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikhil", |
|
"middle": [], |
|
"last": "Dinesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eleni", |
|
"middle": [], |
|
"last": "Miltsakaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Livio", |
|
"middle": [], |
|
"last": "Robaldo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aravind", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bonnie", |
|
"middle": [], |
|
"last": "Webber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 6th International Conference on Language Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2961--2968", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rashmi Prasad, Nikhil Dinesh, Alan Lee, Eleni Miltsakaki, Livio Robaldo, Aravind Joshi, and Bonnie Webber. 2008. The Penn discourse treebank 2.0. In Proceedings of the 6th International Conference on Language Resources and Evaluation, pages 2961-2968.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Automatic slide generation based on discourse structure analysis", |
|
"authors": [ |
|
{ |
|
"first": "Tomohide", |
|
"middle": [], |
|
"last": "Shibata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sadao", |
|
"middle": [], |
|
"last": "Kurohashi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of Second International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "754--766", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomohide Shibata and Sadao Kurohashi. 2005. Automatic slide generation based on discourse structure analysis. In Proceedings of Second International Joint Conference on Natural Language Processing, pages 754-766.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Cheap and fast -but is it good? evaluating non-expert annotations for natural language tasks", |
|
"authors": [ |
|
{ |
|
"first": "Rion", |
|
"middle": [], |
|
"last": "Snow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O'", |
|
"middle": [], |
|
"last": "Brendan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Connor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 2008 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "254--263", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rion Snow, Brendan O'Connor, Daniel Jurafsky, and Andrew Ng. 2008. Cheap and fast -but is it good? evaluating non-expert annotations for natural language tasks. In Proceedings of the 2008 Conference on Empirical Methods in Natural Language Processing, pages 254-263.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "The Potsdam commentary corpus", |
|
"authors": [ |
|
{ |
|
"first": "Manfred", |
|
"middle": [], |
|
"last": "Stede", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 2004 ACL Workshop on Discourse Annotation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "96--102", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Manfred Stede. 2004. The Potsdam commentary corpus. In Proceedings of the 2004 ACL Workshop on Discourse Annotation, pages 96-102.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "An effective discourse parser that uses rich linguistic information", |
|
"authors": [ |
|
{ |
|
"first": "Rajen", |
|
"middle": [], |
|
"last": "Subba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barbara", |
|
"middle": [ |
|
"Di" |
|
], |
|
"last": "Eugenio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of Human Language Technologies: The 2009 Annual Conference of the North American Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "566--574", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rajen Subba and Barbara Di Eugenio. 2009. An effective discourse parser that uses rich linguistic information. In Proceedings of Human Language Technologies: The 2009 Annual Conference of the North American Chapter of the Association for Computational Linguistics, pages 566-574.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Whose vote should count more: Optimal integration of labels from labelers of unknown expertise", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Whitehill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Ruvolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ting Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Javier", |
|
"middle": [], |
|
"last": "Bergsma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Movellan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "22", |
|
"issue": "", |
|
"pages": "2035--2043", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Whitehill, Paul Ruvolo, Ting fan Wu, Jacob Bergsma, and Javier Movellan. 2009. Whose vote should count more: Optimal integration of labels from labelers of unknown expertise. In Y. Bengio, D. Schuurmans, J. Lafferty, C. K. I. Williams, and A. Culotta, editors, Advances in Neural Information Processing Systems 22, pages 2035-2043.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Representing discourse coherence: A corpus-based study", |
|
"authors": [ |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Gibson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Computational Linguistics", |
|
"volume": "31", |
|
"issue": "2", |
|
"pages": "249--287", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Florian Wolf and Edward Gibson. 2005. Representing discourse coherence: A corpus-based study. Computational Linguistics, 31(2):249-287.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Kernel slicing: Scalable online training with conjunctive features", |
|
"authors": [ |
|
{ |
|
"first": "Naoki", |
|
"middle": [], |
|
"last": "Yoshinaga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masaru", |
|
"middle": [], |
|
"last": "Kitsuregawa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 23rd International Conference on Computational Linguistics (COLING2010)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1245--1253", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Naoki Yoshinaga and Masaru Kitsuregawa. 2010. Kernel slicing: Scalable online training with conjunctive fea- tures. In Proceedings of the 23rd International Conference on Computational Linguistics (COLING2010), pages 1245-1253.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Fine-grained sentiment analysis with structural features", |
|
"authors": [ |
|
{ |
|
"first": "C\u00e4cilia", |
|
"middle": [], |
|
"last": "Zirn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mathias", |
|
"middle": [], |
|
"last": "Niepert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heiner", |
|
"middle": [], |
|
"last": "Stuckenschmidt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Strube", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of 5th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "336--344", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C\u00e4cilia Zirn, Mathias Niepert, Heiner Stuckenschmidt, and Michael Strube. 2011. Fine-grained sentiment analysis with structural features. In Proceedings of 5th International Joint Conference on Natural Language Processing, pages 336-344.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "the surgery of my father ended safely] [(I) am relieved a little bit] is tailwind to live,] [there is also headwind.]" |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"text": "Discourse relation tagset with examples.", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"text": "Number of clause pairs resulting from the judgments of discourse relation existence.", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null |
|
}, |
|
"TABREF5": { |
|
"type_str": "table", |
|
"text": "Results of the judgments of lower discourse relation types.", |
|
"html": null, |
|
"content": "<table><tr><td>Upper type</td><td>All</td><td>prob > 0.8</td></tr><tr><td colspan=\"3\">CONTINGENCY 4,439 3,993 (90.0%)</td></tr><tr><td>COMPARISON</td><td>516</td><td>417 (80.8%)</td></tr><tr><td colspan=\"3\">Sum of the above discourse relations 4,955 4,410 (89.0%)</td></tr><tr><td colspan=\"3\">OTHER 4,113 3,753 (91.2%)</td></tr><tr><td colspan=\"3\">Total 9,068 8,163 (90.0%)</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF6": { |
|
"type_str": "table", |
|
"text": "Results of the judgments of upper discourse relation types.", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null |
|
}, |
|
"TABREF7": { |
|
"type_str": "table", |
|
"text": "Since the flower blooms in the fifth lunar month] [it is called \"Satsuki.\"] ...", |
|
"html": null, |
|
"content": "<table><tr><td>Prob # W</td><td>Type</td><td>Document</td></tr><tr><td colspan=\"2\">1.00 6/10 Cause/Reason</td><td/></tr><tr><td colspan=\"3\">... [0.99 4/10 Condition</td></tr><tr><td/><td/><td>[If you click the balloon on the map] [you can see the recommended</td></tr><tr><td/><td/><td>route] ...</td></tr><tr><td>0.81 3/10</td><td>Purpose</td><td/></tr><tr><td/><td/><td>... [And seeking \"Great harvest\"] [each country is engaged in a war]</td></tr><tr><td colspan=\"2\">0.61 2/10 Cause/Reason</td><td/></tr><tr><td/><td/><td>... [by transmitting power to the front and rear axle with the combina-</td></tr><tr><td/><td/><td>tion of gears and shafts] [(it) drives the four wheels.]</td></tr><tr><td>0.54 3/10</td><td>Contrast</td><td/></tr><tr><td/><td/><td>... [a scramble for customers by department stores would be severe.]</td></tr><tr><td/><td/><td>[What comes out is the possibility of the closure of Fukuoka Mit-</td></tr><tr><td/><td/><td>sukoshi.]</td></tr><tr><td/><td/><td>instances of clause pairs for the upper-type</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF8": { |
|
"type_str": "table", |
|
"text": "Features for our discourse parsers.", |
|
"html": null, |
|
"content": "<table><tr><td>Type</td><td/><td>Precision</td><td/><td>Recall</td><td>F1</td></tr><tr><td colspan=\"3\">Cause/Reason 0.623 (441/708)</td><td colspan=\"2\">0.240 (441/1,839)</td><td>0.346</td></tr><tr><td>Purpose</td><td>0.489</td><td>(44/90)</td><td>0.075</td><td>(44/584)</td><td>0.131</td></tr><tr><td>Condition</td><td colspan=\"2\">0.581 (256/441)</td><td colspan=\"2\">0.277 (256/925)</td><td>0.375</td></tr><tr><td>Ground</td><td>0.000</td><td>(0/12)</td><td>0.000</td><td>(0/273)</td><td>0.000</td></tr><tr><td>Contrast</td><td>0.857</td><td>(6/7)</td><td>0.017</td><td>(6/354)</td><td>0.033</td></tr><tr><td>Concession</td><td>0.000</td><td>(0/0)</td><td>0.000</td><td>(0/49)</td><td>0.000</td></tr><tr><td>Other</td><td colspan=\"5\">0.944 (53,702/56,877) 0.992 (53,702/54,111) 0.968</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF9": { |
|
"type_str": "table", |
|
"text": "Performance of our lower-type discourse parser.", |
|
"html": null, |
|
"content": "<table><tr><td>Type</td><td/><td>Precision</td><td/><td>Recall</td><td>F1</td></tr><tr><td colspan=\"3\">CONTINGENCY 0.625 (1,084/1,735)</td><td colspan=\"2\">0.272 (1,084/3,993)</td><td>0.379</td></tr><tr><td colspan=\"2\">COMPARISON 0.412</td><td>(7/17)</td><td>0.017</td><td>(7/417)</td><td>0.032</td></tr><tr><td>OTHER</td><td colspan=\"5\">0.942 (53,454/56,769) 0.988 (53,454/54,111) 0.964</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF10": { |
|
"type_str": "table", |
|
"text": "Performance of our upper-type discourse parser. number of all recognized or gold-standard ones for each discourse relation type.", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |