|
{ |
|
"paper_id": "N13-1007", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:39:54.549321Z" |
|
}, |
|
"title": "Minimally Supervised Method for Multilingual Paraphrase Extraction from Definition Sentences on the Web", |
|
"authors": [ |
|
{ |
|
"first": "Yulan", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Information Analysis Laboratory", |
|
"institution": "Universal Communication Research Institute National Institute of Information and Communications Technology (NICT)", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Chikara", |
|
"middle": [], |
|
"last": "Hashimoto", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Information Analysis Laboratory", |
|
"institution": "Universal Communication Research Institute National Institute of Information and Communications Technology (NICT)", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Kentaro", |
|
"middle": [], |
|
"last": "Torisawa", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Information Analysis Laboratory", |
|
"institution": "Universal Communication Research Institute National Institute of Information and Communications Technology (NICT)", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Takao", |
|
"middle": [], |
|
"last": "Kawai", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Information Analysis Laboratory", |
|
"institution": "Universal Communication Research Institute National Institute of Information and Communications Technology (NICT)", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "\u00b6", |
|
"middle": [], |
|
"last": "Jun'ichi Kazama", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Information Analysis Laboratory", |
|
"institution": "Universal Communication Research Institute National Institute of Information and Communications Technology (NICT)", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Stijn", |
|
"middle": [], |
|
"last": "De Saeger", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Information Analysis Laboratory", |
|
"institution": "Universal Communication Research Institute National Institute of Information and Communications Technology (NICT)", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We propose a minimally supervised method for multilingual paraphrase extraction from definition sentences on the Web. Hashimoto et al. (2011) extracted paraphrases from Japanese definition sentences on the Web, assuming that definition sentences defining the same concept tend to contain paraphrases. However, their method requires manually annotated data and is language dependent. We extend their framework and develop a minimally supervised method applicable to multiple languages. Our experiments show that our method is comparable to Hashimoto et al.'s for Japanese and outperforms previous unsupervised methods for English, Japanese, and Chinese, and that our method extracts 10,000 paraphrases with 92% precision for English, 82.5% precision for Japanese, and 82% precision for Chinese. Defini&on sentences Defini&on pairs Paraphrase candidates Ranked paraphrase candidates Classifier Web Defini&on Extrac&on (Sec&on 2.1) Paraphrase Extrac&on (Sec&on 2.2) Ranking by Score Automa&cally constructed training data Web Wikipedia", |
|
"pdf_parse": { |
|
"paper_id": "N13-1007", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We propose a minimally supervised method for multilingual paraphrase extraction from definition sentences on the Web. Hashimoto et al. (2011) extracted paraphrases from Japanese definition sentences on the Web, assuming that definition sentences defining the same concept tend to contain paraphrases. However, their method requires manually annotated data and is language dependent. We extend their framework and develop a minimally supervised method applicable to multiple languages. Our experiments show that our method is comparable to Hashimoto et al.'s for Japanese and outperforms previous unsupervised methods for English, Japanese, and Chinese, and that our method extracts 10,000 paraphrases with 92% precision for English, 82.5% precision for Japanese, and 82% precision for Chinese. Defini&on sentences Defini&on pairs Paraphrase candidates Ranked paraphrase candidates Classifier Web Defini&on Extrac&on (Sec&on 2.1) Paraphrase Extrac&on (Sec&on 2.2) Ranking by Score Automa&cally constructed training data Web Wikipedia", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Automatic paraphrasing has been recognized as an important component for NLP systems, and many methods have been proposed to acquire paraphrase knowledge (Lin and Pantel, 2001 ; Barzilay and McKeown, 2001; Shinyama et al., 2002; Barzilay and Lee, 2003; Dolan et al., 2004; Callison-Burch, 2008; Hashimoto et al., 2011; Fujita et al., 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 154, |
|
"end": 175, |
|
"text": "(Lin and Pantel, 2001", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 178, |
|
"end": 205, |
|
"text": "Barzilay and McKeown, 2001;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 206, |
|
"end": 228, |
|
"text": "Shinyama et al., 2002;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 229, |
|
"end": 252, |
|
"text": "Barzilay and Lee, 2003;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 253, |
|
"end": 272, |
|
"text": "Dolan et al., 2004;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 273, |
|
"end": 294, |
|
"text": "Callison-Burch, 2008;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 295, |
|
"end": 318, |
|
"text": "Hashimoto et al., 2011;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 319, |
|
"end": 339, |
|
"text": "Fujita et al., 2012)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We propose a minimally supervised method for multilingual paraphrase extraction. Hashimoto et al. (2011) developed a method to extract paraphrases from definition sentences on the Web, based on their observation that definition sentences defining the same concept tend to contain many paraphrases. Their method consists of two steps; they extract definition sentences from the Web, and extract phrasal", |
|
"cite_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 104, |
|
"text": "Hashimoto et al. (2011)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "(1) a. Paraphrasing is the use of your own words to express the author's ideas without changing the meaning. b. Paraphrasing is defined as a process of transforming an expression into another while keeping its meaning intact. (2) a. \u8a00\u3044\u63db\u3048\u3068\u306f\u3001\u3042\u308b\u8868\u73fe\u3092\u305d\u306e\u610f\u5473\u5185\u5bb9\u3092\u5909\u3048\u305a\u306b\u5225\u306e \u8868\u73fe\u306b\u7f6e\u304d\u63db\u3048\u308b\u3053\u3068\u3092\u8a00\u3044\u307e\u3059\u3002 (Paraphrasing refers to the replacement of an expression into another without changing the semantic content.) b. \u8a00 \u3044 \u63db \u3048 \u3068 \u306f \u3001 \u3042 \u308b \u8a00 \u8a9e \u8868 \u73fe \u3092 \u3067 \u304d \u308b \u3060 \u3051\u610f\u5473\u3084\u5185\u5bb9\u3092\u4fdd\u3063\u305f\u307e\u307e\u540c \u4e00 \u8a00 \u8a9e \u306e \u5225 \u306e \u8868 \u73fe \u306b \u5909 \u63db \u3059 \u308b \u51e6 \u7406 \u3067 \u3042 \u308b \u3002 (Paraphrasing is a process of transforming an expression into another of the same language while preserving the meaning and content as much as possible.)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "(3) a. \u610f\u8bd1\u662f\u6307\u8bd1\u8005\u5728\u4e0d\u6539\u53d8\u539f\u6587\u610f\u601d\u7684\u524d\u63d0\u4e0b\uff0c\u5b8c\u5168\u6539\u53d8\u539f \u6587 \u7684 \u53e5 \u5b50 \u7ed3 \u6784 \u3002 (Paraphrasing refers to the transformation of sentence structure by the translator without changing the meaning of original text.)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "b. \u610f\u8bd1\u662f\u6307\u53ea\u4fdd\u6301\u539f\u6587\u5185\u5bb9\uff0c\u4e0d\u4fdd\u6301\u539f\u6587\u5f62\u5f0f\u7684\u7ffb\u8bd1\u65b9\u6cd5\u3002 (Paraphrasing is a translation method of keeping the content of original text but not keeping the expression.) paraphrases from the definition sentences. Both steps require supervised classifiers trained by manually annotated data, and heavily depend on their target language. However, the basic idea is actually language-independent. Figure 1 gives examples of definition sentences on the Web that define the same concept in English, Japanese, and Chinese (with English translation). As indicated by underlines, each definition pair has a phrasal paraphrase. We aim at extending Hashimoto et al.'s method to a minimally supervised method, thereby enabling acquisition of phrasal paraphrases within one language, but in different languages without manually annotated data. The first contribution of our work is to develop a minimally supervised method for multilingual definition extraction that uses a classifier distinguishing definition from non-definition. The classifier is learnt from the first sentences in Wikipedia articles, which can be regarded as the definition of the title of Wikipedia article (Kazama and Torisawa, 2007) and hence can be used as positive examples. Our method relies on a POS tagger, a dependency parser, a NER tool, noun phrase chunking rules, and frequency thresholds for each language, in addition to Wikipedia articles, which can be seen as a manually annotated knowledge base. However, our method needs no additional manual annotation particularly for this task and thus we categorize our method as a minimally supervised method. On the other hand, Hashimoto et al.'s method heavily depends on the properties of Japanese like the assumption that characteristic expressions of definition sentences tend to appear at the end of sentence in Japanese. We show that our method is applicable to English, Japanese, and Chinese, and that its performance is comparable to state-of-the-art supervised methods . Since the three languages are very different we believe that our definition extraction method is applicable to any language as long as Wikipedia articles of the language exist.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1137, |
|
"end": 1164, |
|
"text": "(Kazama and Torisawa, 2007)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 364, |
|
"end": 372, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The second contribution of our work is to develop a minimally supervised method for multilingual paraphrase extraction from definition sentences. Again, Hashimoto et al.'s method utilizes a supervised classifier trained with annotated data particularly prepared for this task. We eliminate the need for annotation and instead introduce a method that uses a novel similarity measure considering the occurrence of phrase fragments in global contexts. Our paraphrase extraction method is mostly language-independent and, through experiments for the three languages, we show that it outperforms unsupervised methods (Pa\u015fca and Dienes, 2005; Koehn et al., 2007) and is comparable to Hashimoto et al.'s supervised method for Japanese.", |
|
"cite_spans": [ |
|
{ |
|
"start": 612, |
|
"end": 636, |
|
"text": "(Pa\u015fca and Dienes, 2005;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 637, |
|
"end": 656, |
|
"text": "Koehn et al., 2007)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Previous methods for paraphrase (and entailment) extraction can be classified into a distributional similarity based approach (Lin and Pantel, 2001; Geffet and Dagan, 2005; Bhagat et al., 2007; Szpektor and Dagan, 2008; Hashimoto et al., 2009) and a parallel corpus based approach (Barzilay and McKeown, 2001; Shinyama et al., 2002; Barzilay and Lee, 2003; Dolan et al., 2004; Callison-Burch, 2008) . The former can exploit large scale monolingual corpora, but is known to be unable to distinguish paraphrase pairs from antonymous pairs (Lin et al., 2003) . The latter rarely mistakes antonymous pairs for paraphrases, but preparing parallel corpora is expensive. As with Hashimoto et al. (2011) , our method is a kind of parallel corpus approach in that it uses definition pairs as a parallel corpus. However, our method does not suffer from a high labor cost of preparing parallel corpora, since it can automatically collect definition pairs from the Web on a large scale. The difference between ours and Hashimoto et al.'s is that our method requires no manual labeling of data and is mostly language-independent.", |
|
"cite_spans": [ |
|
{ |
|
"start": 32, |
|
"end": 48, |
|
"text": "(and entailment)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 126, |
|
"end": 148, |
|
"text": "(Lin and Pantel, 2001;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 172, |
|
"text": "Geffet and Dagan, 2005;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 173, |
|
"end": 193, |
|
"text": "Bhagat et al., 2007;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 194, |
|
"end": 219, |
|
"text": "Szpektor and Dagan, 2008;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 220, |
|
"end": 243, |
|
"text": "Hashimoto et al., 2009)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 281, |
|
"end": 309, |
|
"text": "(Barzilay and McKeown, 2001;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 310, |
|
"end": 332, |
|
"text": "Shinyama et al., 2002;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 333, |
|
"end": 356, |
|
"text": "Barzilay and Lee, 2003;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 357, |
|
"end": 376, |
|
"text": "Dolan et al., 2004;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 377, |
|
"end": 398, |
|
"text": "Callison-Burch, 2008)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 537, |
|
"end": 555, |
|
"text": "(Lin et al., 2003)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 672, |
|
"end": 695, |
|
"text": "Hashimoto et al. (2011)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our method first extracts definition sentences from the Web, and then extracts paraphrases from the definition sentences, as illustrated in Figure 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 148, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our method learns a classifier that classifies sentences into definition and non-definition using automatically constructed training data, TrDat. TrDat's positive examples, Pos, are the first sentences of Wikipedia articles and the negative examples, Neg, are randomly sampled Web sentences. The former can be seen as definition, while the chance that the sentences in the latter are definition is quite small.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automatic Construction of Training Data", |
|
"sec_num": "2.1.1" |
|
}, |
|
{ |
|
"text": "Our definition extraction not only distinguishes definition from non-definition but also identities the defined term of a definition sentence, and in the paraphrase extraction step our method couples two definition sentences if their defined terms are identical. For example, the defined terms of (1a) and (1b) in Figure 1 are both \"Paraphrasing\" and thus the two definition sentences are coupled. For Pos, we mark up the title of Wikipedia article as the defined term. For Neg, we randomly select a noun phrase in a sen- ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 314, |
|
"end": 322, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Automatic Construction of Training Data", |
|
"sec_num": "2.1.1" |
|
}, |
|
{ |
|
"text": "As features, we use patterns that are characteristic of definition (definition patterns) and those that are unlikely to be a part of definition (non-definition patterns). Patterns are either N-grams, subsequences, or dependency subtrees, and are mined automatically from TrDat. Table 1 shows examples of patterns mined by our method. In (A) of Table 1 , \"\u02c6\" is a symbol representing the beginning of a sentence. In (B), \"*\" represents a wildcard that matches any number of arbitrary words. Patterns are represented by either their words' surface form, base form, or POS. (Chinese words do not inflect and thus we do not use the base form for Chinese.)", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 278, |
|
"end": 285, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 344, |
|
"end": 351, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Feature Extraction and Learning", |
|
"sec_num": "2.1.2" |
|
}, |
|
{ |
|
"text": "We assume that definition patterns are frequent in Pos but are infrequent in Neg, and non-definition patterns are frequent in Neg but are infrequent in Pos. To see if a given pattern \u03c6 is likely to be a definition pattern, we measure \u03c6's probability rate Rate(\u03c6). If the probability rate of \u03c6 is large, \u03c6 tends to be a definition pattern. The probability rate of \u03c6 is:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Extraction and Learning", |
|
"sec_num": "2.1.2" |
|
}, |
|
{ |
|
"text": "Rate(\u03c6) = f req(\u03c6, Pos)/|Pos| f req(\u03c6, Neg)/|Neg| , if f req(\u03c6, Neg) = 0.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Extraction and Learning", |
|
"sec_num": "2.1.2" |
|
}, |
|
{ |
|
"text": "Here, freq(\u03c6, Pos) = |{s \u2208 Pos : \u03c6 \u2286 s}| and freq(\u03c6, Neg) = |{s \u2208 Neg : \u03c6 \u2286 s}|. We write \u03c6 \u2286 s if sentence s contains \u03c6. If f req(\u03c6, Neg) = 0, Rate(\u03c6) is set to the largest value of all the patterns' Rate values. Only patterns whose Rate is more than or equal to a Rate threshold \u03c1 pos and whose f req(\u03c6, Pos) is more than or equal to a frequency threshold are regarded as definition patterns. Similarly, we check if \u03c6 is likely to be a non-definition pattern. Only patterns whose Rate is less or equal to a Rate threshold \u03c1 neg and whose f req(\u03c6, Neg) is more than or equal to a frequency threshold are regarded as non-definition patterns. The probability rate is based on the growth rate (Dong and Li, 1999) . \u03c1 pos and \u03c1 neg are set to 2 and 0.5, while the frequency threshold is set differently according to languages, pattern types (N-gram, subsequence, and subtree), representation (surface, base, and POS), and data (Pos and Neg), as in Table 2 . The thresholds in Table 2 were determined manually, but not really arbitrarily. Basically they were determined according to the frequency of each pattern in our data (e.g. how frequently the surface N-gram of English appears in English positive training samples (Pos)).", |
|
"cite_spans": [ |
|
{ |
|
"start": 691, |
|
"end": 710, |
|
"text": "(Dong and Li, 1999)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 945, |
|
"end": 952, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 973, |
|
"end": 980, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Feature Extraction and Learning", |
|
"sec_num": "2.1.2" |
|
}, |
|
{ |
|
"text": "Below, we detail how patterns are acquired. First, we acquire N-gram patterns. Then, subsequence patterns are acquired using the N-gram patterns as input. Finally, subtree patterns are acquired using the subsequence patterns as input.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Extraction and Learning", |
|
"sec_num": "2.1.2" |
|
}, |
|
{ |
|
"text": "We collect N-gram patterns from TrDat with N ranging from 2 to 6. We filter out N-grams using thresholds on the Rate and frequency, and regard those that are kept as definition or non-definition N-grams.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "N-gram patterns", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We generate subsequence patterns as ordered combinations of N-grams with the wild card \"*\" inserted between them (we use two or three N-grams for a subsequence). Then, we check each of the generated subsequences and keep it if there exists a sentence in TrDat that contains the subsequence and whose root node is contained in the subsequence. For example, subsequence \"[term] is a * in the\" is kept if a term-marked sentence like \"[term] is a baseball player in the Dominican Republic.\" exists in TrDat. Then, patterns are filtered out using thresholds on the Rate and frequency as we did for N-grams.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subsequence patterns", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Subtree patterns For each definition and nondefinition subsequence, we retrieve all the termmarked sentences that contain the subsequence from TrDat, and extract a minimal dependency subtree that covers all the words of the subsequence from each retrieved sentence. For example, assume that we retrieve a term-marked sentence \"[term] is usually defined as the way of life of a group of people.\" for subsequence \"[term] is * defined as the\". Then we extract from the sentence the minimal dependency subtree in the left side of (C) of Table 1 . Note that all the words of the subsequence are contained in the subtree, and that in the subtree a node (\"way\") that is not a part of the subsequence is replaced with its dependency label (\"NP\") assigned by the dependency parser. The patterns are filtered out using thresholds on the Rate and frequency.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 533, |
|
"end": 540, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Subsequence patterns", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We train a SVM classifier with a linear kernel, using binary features that indicate the occurrence of the patterns described above in a target sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subsequence patterns", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In theory, we could feed all the features to the SVM classifier and let the classifier pick informative features. But we restricted the feature set for practical reasons: the number of features would become tremendously large. There are two reasons for this. First, the number of sentences in our automatically acquired training data is huge (2,439,257 positive sentences plus 5,000,000 negative sentences for English, 703,208 positive sentences plus 1,400,000 negative sentences for Japanese and 310,072 positive sentences plus 600,000 negative sentences for Chinese). Second, since each subsequence pattern is generated as a combination of two or three Ngram patterns and one subsequence pattern can generate one or more subtree patterns, using all possible features leads to a combinatorial explosion of features. Moreover, since the feature vector will be highly sparse with a huge number of infrequent features, SVM learning becomes very time consuming. In preliminary experiments we observed that when using all possible features the learning process took more than one week for each language. We therefore introduced the current feature selection method, in which the learning process finished in one day but 1 http://svmlight.joachims.org. still obtains good results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subsequence patterns", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We extract a large amount of definition sentences by applying this classifier to sentences in our Web archive. Because our classifier requires termmarked sentences (sentences in which the term being defined is marked) as input, we first have to identify all such defined term candidates for each sentence. For example, Figure 3 shows a case where a Web sentence has two NPs (two candidates of defined term). Basically we pick up NPs in a sentence by simple heuristic rules. For English, NPs are identified using TreeTagger (Schmid, 1995) and two NPs are merged into one when they are connected by \"for\" or \"of\". After applying this procedure recursively, the longest NPs are regarded as candidates of defined terms and term-marked sentences are generated. For Japanese, we first identify nouns that are optionally modified by adjectives as NPs, and allow two NPs connected by \"\u306e\" (of ), if any, to form a larger NP. For Chinese, nouns that are optionally modified by adjectives are considered as NPs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 523, |
|
"end": 537, |
|
"text": "(Schmid, 1995)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 319, |
|
"end": 327, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Definition Extraction from the Web", |
|
"sec_num": "2.1.3" |
|
}, |
|
{ |
|
"text": "Then, each term-marked sentence is given a feature vector and classified by the classifier. The termmarked sentence whose SVM score (the distance from the hyperplane) is the largest among those from the same original Web sentence is chosen as the final classification result for the original Web sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Definition Extraction from the Web", |
|
"sec_num": "2.1.3" |
|
}, |
|
{ |
|
"text": "We use all the Web sentences classified as definition and all the sentences in Pos for paraphrase extraction. First, we couple two definition sentences whose defined term is the same. We filter out definition sentence pairs whose cosine similarity of content word vectors is less than or equal to threshold C, which is set to 0.1. Then, we extract phrases from each definition sentence, and generate all possible phrase pairs from the coupled sentences. In this study, phrases are restricted to predicate phrases that consist of at least one dependency relation and in which all the constituents are consecutive in a", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrase Extraction", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "f 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrase Extraction", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The ratio of the number of words shared between two candidate phrases to the number of all of the words in the two phrases. Words are represented by either their surface form (f 1,1 ), base form (f 1,2 ) or POS (f 1,3 ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrase Extraction", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The identity of the leftmost word (surface form (f 2,1 ), base form (f 2,2 ) or POS (f 2,3 )) between two candidate phrases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "f 2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "f 3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "f 2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The same as f 2 except that we use the rightmost word. There are three corresponding subfunctions (f 3,1 to f 3,3 ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "f 2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The ratio of the number of words that appear in a candidate phrase segment of a definition sentence s 1 and in a segment that is NOT a part of the candidate phrase of another definition sentence s 2 to the number of all the words of s 1 's candidate phrase. Words are in their base form (f 4,1 ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "f 4", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "f 5 The reversed (s 1 \u2194 s 2 ) version of f 4,1 (f 5,1 ). f 6", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "f 4", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The ratio of the number of words (the surface form) of a shorter candidate phrase to that of a longer one (f 6,1 ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "f 4", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "f 7", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "f 4", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Cosine similarity between two definition sentences from which two candidate phrases are extracted. Only content words in the base form are used (f 7,1 ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "f 4", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The ratio of the number of parent dependency subtrees that are shared by two candidate phrases to the number of all the parent dependency subtrees. The parent dependency subtrees are adjacent to the candidate phrases and represented by their surface form (f 8,1 ), base form (f 8,2 ), or POS (f 8,3 ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "f 8", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The same as f 8 except that we use child dependency subtrees. There are 3 subfunctions (f 9,1 to f 9,3 ) of f 9 type.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "f 9", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The ratio of the number of context N-grams that are shared by two candidate phrases to the number of all the context Ngrams of both candidate phrases. The context N-grams are adjacent to the candidate phrases and represented by either the surface form, the base form, or POS. The N ranges from 1 to 3, and the context is either left-side or right-side. Thus, there are 18 subfunctions (3 \u00d7 3 \u00d7 2). sentence. Accordingly, if two definition sentences that are coupled have three such predicate phrases respectively, we get nine phrase pairs, for instance. A phrase pair extracted from a definition pair is a paraphrase candidate and is given a score that indicates the likelihood of being a paraphrase, Score. It consists of two similarity measures, local similarity and global similarity, which are detailed below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "f 10", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Local similarity Following Hashimoto et al., we assume that two candidate phrases (p 1 , p 2 ) tend to be a paraphrase if they are similar enough and/or their surrounding contexts are sufficiently similar. Then, we calculate the local similarity (localSim) of (p 1 , p 2 ) as the weighted sum of 37 similarity subfunctions that are grouped into 10 types (Table 3 .) For example, the f 1 type consists of three subfunctions, f 1,1 , f 1,2 , and f 1,3 . The 37 subfunctions are inspired by Hashimoto et al.'s features. Then, local-Sim is defined as:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 354, |
|
"end": 362, |
|
"text": "(Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "f 10", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "localSim(p 1 , p 2 ) = max (d l ,dm)\u2208DP (p1,p2) ls(p 1 , p 2 , d l , d m ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "f 10", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Here", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "f 10", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ", ls(p 1 , p 2 , d l , d m ) = 10 i=1 k i j=1 w i,j \u00d7f i,j (p1,p2,d l ,dm) k i . DP (p 1 , p 2 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "f 10", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "is the set of all definition sentence pairs that contain (p 1 , p 2 ). (d l , d m ) is a definition sentence pair containing (p 1 , p 2 ). k i is the number of subfunctions of f i type. w i,j is the weight for f i,j . w i,j is uniformly set to 1 except for f 4,1 and f 5,1 , whose weight is set to \u22121 since they indicate the unlikelihood of (p 1 , p 2 )'s being a paraphrase. As the formula indicates, if there is more than one definition sentence pair that contains (p 1 , p 2 ), localSim is calculated from the definition sentence pair that gives the maximum value of", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 71, |
|
"end": 83, |
|
"text": "(d l , d m )", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "f 10", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "ls(p 1 , p 2 , d l , d m )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "f 10", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ". localSim is local in the sense that it is calculated based on only one definition pair from which (p 1 , p 2 ) are extracted.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "f 10", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Global similarity The global similarity (global-Sim) is our novel similarity function. We decompose a candidate phrase pair (p 1 , p 2 ) into Comm, the common part between p 1 and p 2 , and Diff , the difference between the two. For example, Comm and Diff of (\"keep the meaning intact\", \"preserve the meaning\") is (\"the meaning\") and (\"keep, intact\", \"preserve\"). globalSim measures the semantic similarity of the Diff of a phrase pair. It is proposed based on the following intuition: phrase pair (p 1 , p 2 ) tend to be a paraphrase if their surface difference (i.e. Diff ) have the same meaning. For example, if \"keep, intact\" and \"preserve\" mean the same, then (\"keep the meaning intact\", \"preserve the meaning\") is a paraphrase. globalSim considers the occurrence of Diff in global contexts (i.e., all the paraphrase candidates from all the definition pairs). The globalSim of a given phrase pair (p 1 , p 2 ) is measured by basically counting how many times the Diff of (p 1 , p 2 ) appears in all the candidate phrase pairs from all the definition pairs. The assumption is that Diff tends to share the same meaning if it appears repeatedly in paraphrase candidates from all definition sentence pairs, i.e., our parallel corpus. Each occurrence of Diff is weighted by the localSim of the phrase pair in which Diff occurs. Precisely, globalSim is defined as: Threshold The frequency threshold of Table 2 (Section 2.1.2).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "f 10", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Rules for identifying NPs in sentences (Section 2.1.3).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NP rule", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The list of content words' POS (Section 2.2). Tagger/parser POS taggers, dependency parsers and NER tools. The final score for a candidate phrase pair is:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "POS list", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Score(p 1 , p 2 ) = localSim(p 1 , p 2 ) + ln globalSim(p 1 , p 2 ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "POS list", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The way of combining the two similarity functions has been determined empirically after testing several other ways of combining them. This ranks all the candidate phrase pairs. Finally, we summarize language-dependent components that we fix manually in Table 4 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 253, |
|
"end": 260, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "POS list", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We show that our unsupervised definition extraction method is competitive with state-of-the-art supervised methods for English , and that it extracts a large number of definitions reasonably accurately for English (3,216,121 definitions with 70% precision), Japanese (651,293 definitions with 62.5% precision), and Chinese (682,661 definitions with 67% precision).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments of Definition Extraction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "2 If there are more than one (pi, pj) in a definition pair, we use only one of them that has the largest localSim value.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments of Definition Extraction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "3 Although we claim that our idea of using globalSim is effective, we do not claim that the above formula for calculating is the optimal way to implement the idea. Currently we are investigating a more mathematically well-motivated model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments of Definition Extraction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "First we describe Pos, Neg, and the Web corpus from which definition sentences are extracted. As the source of Pos, we used the English Wikipedia of April 2011 (3,620,149 articles), the Japanese Wikipedia of October 2011 (830,417 articles), and the Chinese Wikipedia of August 2011 (365,545 articles). We removed category articles, template articles, list articles and so on from them. Then the number of sentences of Pos was 2,439,257 for English, 703,208 for Japanese, and 310,072 for Chinese. We verified our assumption that Wikipedia first sentences can mostly be seen as definition by manually checking 200 random samples from Pos. 96.5% of English Pos, 100% of Japanese Pos, and 99.5% of Chinese Pos were definitions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preparing Corpora", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "As the source of Neg, we used 600 million Japanese Web pages (Akamine et al., 2010) and the ClueWeb09 corpus for English (about 504 million pages) and Chinese (about 177 million pages). 4 From each Web corpus, we collected the sentences satisfying following conditions: 1) they contain 5 to 50 words and at least one verb, 2) less than half of their words are numbers, and 3) they end with a period. Then we randomly sampled sentences from the collected sentences as Neg so that |Neg| was about twice as large as |Pos|: 5,000,000 for English, 1,400,000 for Japanese, and 600,000 for Chinese.", |
|
"cite_spans": [ |
|
{ |
|
"start": 186, |
|
"end": 187, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preparing Corpora", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "In Section 3.1.3, we use 10% of the Web corpus as the input to the definition classifier. The number of sentences are 294,844,141 for English, 245,537,860 for Japanese, and 68,653,130 for Chinese.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preparing Corpora", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "All the sentences were POS-tagged and parsed. We used TreeTagger and MSTParser (McDonald et al., 2006) for English, JUMAN (Kurohashi and Kawahara, 2009a) and KNP (Kurohashi and Kawahara, 2009b) for Japanese, MMA (Kruengkrai et al., 2009) and CNP (Chen et al., 2009) for Chinese.", |
|
"cite_spans": [ |
|
{ |
|
"start": 54, |
|
"end": 102, |
|
"text": "TreeTagger and MSTParser (McDonald et al., 2006)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 116, |
|
"end": 153, |
|
"text": "JUMAN (Kurohashi and Kawahara, 2009a)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 158, |
|
"end": 193, |
|
"text": "KNP (Kurohashi and Kawahara, 2009b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 212, |
|
"end": 237, |
|
"text": "(Kruengkrai et al., 2009)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 246, |
|
"end": 265, |
|
"text": "(Chen et al., 2009)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preparing Corpora", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "We compared our method with the state-of-theart supervised methods proposed by , using their WCL datasets v1.0 (http: //lcl.uniroma1.it/wcl/), definition and nondefinition datasets for English . Specifically, we used its training data (T rDat wcl , hereafter), which consisted of 1,908 definition and 2,711 non-definition sentences, and compared the following three methods. WCL-1 and WCL-3 are methods proposed by . They were trained and tested with 10 fold cross validation using T rDat wcl . Proposed def is our method, which used TrDat for acquiring patterns (Section 2.1.2) and training. We tested Proposed def on each of T rDat wcl 's 10 folds and averaged the results. Note that, for Proposed def , we removed sentences in T rDat wcl from TrDat in advance for fairness. Table 5 shows the results. The numbers for WCL-1 and WCL-3 are taken from . Proposed def outperformed both methods in terms of recall, F1, and accuracy. Thus, we conclude that Proposed def is comparable to WCL-1/WCL-3.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 777, |
|
"end": 784, |
|
"text": "Table 5", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison with Previous Methods", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "We conducted ablation tests of our method to investigate the effectiveness of each type of pattern. When using only N-grams, F1 was 85.41. When using N-grams and subsequences, F1 was 86.61. When using N-grams and subtrees, F1 was 86.85. When using all the features, F1 was 86.88. The results show that each type of patterns contribute to the performance, but the contributions of subsequence patterns and subtree patterns do not seem very significant.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison with Previous Methods", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "We extracted definitions from 10% of the Web corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments of Definition Extraction", |
|
"sec_num": "3.1.3" |
|
}, |
|
{ |
|
"text": "We applied Proposed def to the corpus of each language, and the state-of-the-art supervised method for Japanese (Hashimoto et al., 2011 ) (Hashi def , hereafter) to the Japanese corpus. Hashi def was trained on their training data that consisted of 2,911 sentences, 61.1% of which were definitions. Note that we removed sentences in TrDat from 10% of the Web corpus in advance, while we did not remove Hashimoto et al.'s training data from the corpus. This means that, for Hashi def , the training data is included in the test data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 135, |
|
"text": "(Hashimoto et al., 2011", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments of Definition Extraction", |
|
"sec_num": "3.1.3" |
|
}, |
|
{ |
|
"text": "For each method, we filtered out its positive outputs whose defined term appeared more than 1,000 times in 10% of the Web corpus, since those terms tend to be too vague to be a defined term or refer to an entity outside the definition sentence. For example, if \"the college\" appears more than 1,000 times in 10% of the corpus, we filter out sentences like \"The college is one of three colleges in the Coast Community College District and was founded in 1947.\" For Proposed def , the number of remaining positive outputs is 3,216,121 for English, 651,293 for Japanese, and 682,661 for Chinese. For Hashi def , the number of positive outputs is 523,882.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments of Definition Extraction", |
|
"sec_num": "3.1.3" |
|
}, |
|
{ |
|
"text": "For Proposed def of each language, we randomly sampled 200 sentences from the remaining positive outputs. For Hashi def , we first sorted its output by the SVM score in descending order and then randomly sampled 200 from the top 651,293, i.e., the same number as the remaining positive outputs of Proposed def of Japanese, out of all the remaining sentences of Hashi def .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments of Definition Extraction", |
|
"sec_num": "3.1.3" |
|
}, |
|
{ |
|
"text": "For each language, after shuffling all the samples, two human annotators evaluated each sample. The annotators for English and Japanese were not the authors, while one of the Chinese annotators was one of the authors. We regarded a sample as a definition if it was regarded as a definition by both annotators. Cohen's kappa (Cohen, 1960) was 0.55 for English (moderate agreement (Landis and Koch, 1977) ), 0.73 for Japanese (substantial agreement), and 0.69 for Chinese (substantial agreement).", |
|
"cite_spans": [ |
|
{ |
|
"start": 324, |
|
"end": 337, |
|
"text": "(Cohen, 1960)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 391, |
|
"end": 402, |
|
"text": "Koch, 1977)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments of Definition Extraction", |
|
"sec_num": "3.1.3" |
|
}, |
|
{ |
|
"text": "For English, Proposed def achieved 70% precision for the 200 samples. For Japanese, Proposed def achieved 62.5% precision for the 200 samples, while Hashi def achieved 70% precision for the 200 samples. For Chinese, Proposed def achieved 67% precision for the 200 samples. From these results, we conclude that Proposed def can extract a large number of definition sentences from the Web moderately well for the three languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments of Definition Extraction", |
|
"sec_num": "3.1.3" |
|
}, |
|
{ |
|
"text": "Although the precision is not very high, our experiments in the next section show that we can still extract a large number of paraphrases with high precision from these definition sentences, due mainly to our similarity measures, localSim and globalSim.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments of Definition Extraction", |
|
"sec_num": "3.1.3" |
|
}, |
|
{ |
|
"text": "We show (1) that our paraphrase extraction method outperforms unsupervised methods for the three languages, (2) that globalSim is effective, and (3) that our method is comparable to the state-of-the-art su- (Koehn et al., 2007) . We assume that Moses should extract a set of two phrases that are paraphrases of each other, if we input monolingual parallel sentence pairs like our definition pairs. We used default values for all the parameters. Outputs are ranked by the product of two phrase translation probabilities of both directions. P&D: The distributional similarity based method by Pa\u015fca and Dienes (2005) (their \"N-gram-Only\" method) . Outputs are ranked by the number of contexts two phrases share. Following Pa\u015fca and Dienes (2005) , we used the parameters LC = 3 and M axP = 4, while M inP , which was 1 in Pa\u015fca and Dienes (2005) , was set to 2 since our target was phrasal paraphrases. pervised method for Japanese.", |
|
"cite_spans": [ |
|
{ |
|
"start": 207, |
|
"end": 227, |
|
"text": "(Koehn et al., 2007)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 590, |
|
"end": 642, |
|
"text": "Pa\u015fca and Dienes (2005) (their \"N-gram-Only\" method)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 719, |
|
"end": 742, |
|
"text": "Pa\u015fca and Dienes (2005)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 819, |
|
"end": 842, |
|
"text": "Pa\u015fca and Dienes (2005)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments of Paraphrase Extraction", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We extracted paraphrases from definition sentences in Pos and those extracted by Proposed def in Section 3.1.3. First we coupled two definition sentences whose defined term was the same. The number of definition pairs was 3,208,086 for English, 742,306 for Japanese, and 457,233 for Chinese.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setting", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "Then we evaluated six methods in Table 6 . 5 All the methods except P&D took the same definition pairs as input, while P&D's input was 10% of the Web corpus. The input can be seen as the same for all the methods, since the definition pairs were derived from that 10% of the Web corpus. In our experiments Exp1 and Exp2 below, all evaluation samples were shuffled so that human annotators could not know which sample was from which method. Annotators were the same as those who conducted the evaluation in Section 3.1.3. Cohen's kappa (Cohen, 1960) was 0.83 for English, 0.88 for Japanese, and 0.85 for Chinese, all of which indicated reasonably good (Landis and Koch, 1977) . We regarded a candidate phrase pair as a paraphrase if both annotators regarded it as a paraphrase.", |
|
"cite_spans": [ |
|
{ |
|
"start": 662, |
|
"end": 673, |
|
"text": "Koch, 1977)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 33, |
|
"end": 40, |
|
"text": "Table 6", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Setting", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "We compared the methods that take definition pairs as input, i.e. Proposed Score , Proposed local , Hashi sup , Hashi uns , and SMT. We randomly sampled 200 phrase pairs from the top 10,000 for each method for evaluation. The evaluation of each candidate phrase pair (p 1 , p 2 ) was based on bidirectional checking of entailment relation, p 1 \u2192 p 2 and p 2 \u2192 p 1 , with p 1 and p 2 embedded in contexts, as Hashimoto et al. (2011) did. Entailment relation of both directions hold if (p 1 , p 2 ) is a paraphrase. We used definition pairs from which candidate phrase pairs were extracted as contexts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 408, |
|
"end": 431, |
|
"text": "Hashimoto et al. (2011)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Exp1", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We compared Proposed Score and P&D. Since P&D restricted its output to phrase pairs in which each phrase consists of two to four words, we restricted the output of Proposed Score to 2-to-4words phrase pairs, too. We randomly sampled 200 from the top 3,000 phrase pairs from each method for evaluation, and the annotators checked entailment relation of both directions between two phrases using Web sentence pairs that contained the two phrases as contexts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Exp2", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "From Exp1, we obtained precision curves in the upper half of Figure 4 . The curves were drawn from the 200 samples that were sorted in descending order by their score, and we plotted a dot for every 5 samples. Proposed Score outperformed Proposed local for the three languages, and thus globalSim was effective. Proposed Score outperformed Hashi sup . However, we observed that Proposed Score acquired many candidate phrase pairs (p 1 , p 2 ) for which p 1 and p 2 consisted of the same content words like \"send a postcard to the author\" and \"send the author a postcard,\" while the other methods tended to acquire more content word variations like \"have a chance\" and \"have an opportunity.\" Then we evaluated all the methods in terms of how many paraphrases with content word variations were extracted. We extracted from the evaluation samples only candidate phrase pairs whose Diff contained a content word (content word variation pairs), to see how many of them were paraphrases. The lower half of Figure 4 shows the results (curves labeled with cwv). The number of samples for Proposed Score reduced drastically compared to the others for English and Japanese, though precision was kept at a high level. It is due mainly to the globalSim; the Diff of the non-content word variation pairs appears frequently in paraphrase candidates, and thus their globalSim scores are high.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 69, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1000, |
|
"end": 1008, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "From Exp2, precision curves in Figure 5 were obtained. P&D acquired more content word variation pairs as the curves labeled by cwv indicates. However, Proposed Score 's precision outperformed P&D's by a large margin for the three languages.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 31, |
|
"end": 39, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "From all of these results, we conclude (1) that our paraphrase extraction method outperforms unsupervised methods for the three languages, (2) that glob-alSim is effective, and (3) that our method is comparable to the state-of-the-art supervised method for Japanese, though our method tends to extract fewer content word variation pairs than the others. Table 7 shows examples of English paraphrases extracted by Proposed Score .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 354, |
|
"end": 361, |
|
"text": "Table 7", |
|
"ref_id": "TABREF10" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "is based in Halifax = is headquartered in Halifax used for treating HIV = used to treat HIV is a rare form = is an uncommon type is a set = is an unordered collection has an important role = plays a key role ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "We proposed a minimally supervised method for multilingual paraphrase extraction. Our experiments showed that our paraphrase extraction method outperforms unsupervised methods (Pa\u015fca and Dienes, 2005; Koehn et al., 2007; Hashimoto et al., 2011) for English, Japanese, and Chinese, and is comparable to the state-of-the-art language dependent supervised method for Japanese (Hashimoto et al., 2011) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 176, |
|
"end": 200, |
|
"text": "(Pa\u015fca and Dienes, 2005;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 201, |
|
"end": 220, |
|
"text": "Koehn et al., 2007;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 221, |
|
"end": 244, |
|
"text": "Hashimoto et al., 2011)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 373, |
|
"end": 397, |
|
"text": "(Hashimoto et al., 2011)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "http://lemurproject.org/clueweb09.php/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We filtered out phrase pairs in which one phrase contained a named entity but the other did not contain the named entity from the output of ProposedScore, Proposed local , SMT, and P&D, since most of them were not paraphrases. We used Stanford NER(Finkel et al., 2005) for English named entity recognition (NER), KNP for Japanese NER, and BaseNER(Zhao and Kit, 2008) for Chinese NER. Hashisup and Hashiuns did the named entity filtering of the same kind (footnote 3 ofHashimoto et al. (2011)), and thus we did not apply the filter to them any further.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Organizing information on the web to support user judgments on information credibility", |
|
"authors": [ |
|
{ |
|
"first": "Daisuke", |
|
"middle": [], |
|
"last": "Susumu Akamine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshikiyo", |
|
"middle": [], |
|
"last": "Kawahara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tetsuji", |
|
"middle": [], |
|
"last": "Kato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yutaka", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Nakagawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takuya", |
|
"middle": [], |
|
"last": "Leon-Suematsu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kentaro", |
|
"middle": [], |
|
"last": "Kawada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sadao", |
|
"middle": [], |
|
"last": "Inui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yutaka", |
|
"middle": [], |
|
"last": "Kurohashi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kidawara", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of 2010 4th International Universal Communication Symposium Proceedings (IUCS 2010)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "122--129", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Susumu Akamine, Daisuke Kawahara, Yoshikiyo Kato, Tetsuji Nakagawa, Yutaka I. Leon-Suematsu, Takuya Kawada, Kentaro Inui, Sadao Kurohashi, and Yutaka Kidawara. 2010. Organizing information on the web to support user judgments on information credibil- ity. In Proceedings of 2010 4th International Uni- versal Communication Symposium Proceedings (IUCS 2010), pages 122-129.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Learning to paraphrase: An unsupervised approach using multiplesequence alignment", |
|
"authors": [ |
|
{ |
|
"first": "Regina", |
|
"middle": [], |
|
"last": "Barzilay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lillian", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of HLT-NAACL 2003", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "16--23", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Regina Barzilay and Lillian Lee. 2003. Learning to paraphrase: An unsupervised approach using multiple- sequence alignment. In Proceedings of HLT-NAACL 2003, pages 16-23.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Extracting paraphrases from a parallel corpus", |
|
"authors": [ |
|
{ |
|
"first": "Regina", |
|
"middle": [], |
|
"last": "Barzilay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathleen", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Mckeown", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the 39th Annual Meeting of the ACL joint with the 10th Meeting of the European Chapter of the ACL (ACL/EACL 2001)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "50--57", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Regina Barzilay and Kathleen R. McKeown. 2001. Ex- tracting paraphrases from a parallel corpus. In Pro- ceedings of the 39th Annual Meeting of the ACL joint with the 10th Meeting of the European Chapter of the ACL (ACL/EACL 2001), pages 50-57.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Ledir: An unsupervised algorithm for learning directionality of inference rules", |
|
"authors": [ |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Bhagat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Pantel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of Conference on Empirical Methods in Natural Language Processing (EMNLP2007)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "161--170", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rahul Bhagat, Patrick Pantel, and Eduard Hovy. 2007. Ledir: An unsupervised algorithm for learning direc- tionality of inference rules. In Proceedings of Confer- ence on Empirical Methods in Natural Language Pro- cessing (EMNLP2007), pages 161-170.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Syntactic constraints on paraphrases extracted from parallel corpora", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-", |
|
"middle": [], |
|
"last": "Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 2008 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "196--205", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Callison-Burch. 2008. Syntactic constraints on paraphrases extracted from parallel corpora. In Pro- ceedings of the 2008 Conference on Empirical Meth- ods in Natural Language Processing, pages 196-205.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Improving dependency parsing with subtrees from auto-parsed data", |
|
"authors": [ |
|
{ |
|
"first": "Wenliang", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kiyotaka", |
|
"middle": [], |
|
"last": "Kazama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kentaro", |
|
"middle": [], |
|
"last": "Uchimoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Torisawa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing, EMNLP '09", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "570--579", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenliang Chen, Jun'ichi Kazama, Kiyotaka Uchimoto, and Kentaro Torisawa. 2009. Improving dependency parsing with subtrees from auto-parsed data. In Pro- ceedings of the 2009 Conference on Empirical Meth- ods in Natural Language Processing, EMNLP '09, pages 570-579, Singapore. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Coefficient of agreement for nominal scales", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1960, |
|
"venue": "Educational and Psychological Measurement", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "37--46", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Cohen. 1960. Coefficient of agreement for nom- inal scales. In Educational and Psychological Mea- surement, pages 37-46.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Unsupervised construction of large paraphrase corpora: exploiting massively parallel news sources", |
|
"authors": [ |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Dolan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Quirk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Brockett", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 20th international conference on Computational Linguistics (COLING 2004)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "350--356", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bill Dolan, Chris Quirk, and Chris Brockett. 2004. Un- supervised construction of large paraphrase corpora: exploiting massively parallel news sources. In Pro- ceedings of the 20th international conference on Com- putational Linguistics (COLING 2004), pages 350- 356, Geneva, Switzerland, Aug 23-Aug 27.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Efficient mining of emerging patterns: discovering trends and differences", |
|
"authors": [ |
|
{ |
|
"first": "Guozhu", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinyan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proceedings of the fifth ACM SIGKDD international conference on Knowledge discovery and data mining, KDD '99", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "43--52", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guozhu Dong and Jinyan Li. 1999. Efficient mining of emerging patterns: discovering trends and differences. In Proceedings of the fifth ACM SIGKDD international conference on Knowledge discovery and data mining, KDD '99, pages 43-52, San Diego, California, United States.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Incorporating non-local information into information extraction systems by gibbs sampling", |
|
"authors": [ |
|
{ |
|
"first": "Jenny", |
|
"middle": [ |
|
"Rose" |
|
], |
|
"last": "Finkel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trond", |
|
"middle": [], |
|
"last": "Grenager", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jenny Rose Finkel, Trond Grenager, and Christopher Manning. 2005. Incorporating non-local information into information extraction systems by gibbs sampling.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Proceedings of the 43nd Annual Meeting of the Association for Computational Linguistics (ACL 2005)", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "363--370", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "In Proceedings of the 43nd Annual Meeting of the As- sociation for Computational Linguistics (ACL 2005), pages 363-370.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Enlarging paraphrase collections through generalization and instantiation", |
|
"authors": [ |
|
{ |
|
"first": "Atsushi", |
|
"middle": [], |
|
"last": "Fujita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierre", |
|
"middle": [], |
|
"last": "Isabelle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roland", |
|
"middle": [], |
|
"last": "Kuhn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "631--642", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Atsushi Fujita, Pierre Isabelle, and Roland Kuhn. 2012. Enlarging paraphrase collections through generaliza- tion and instantiation. In Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Lan- guage Processing and Computational Natural Lan- guage Learning (EMNLP-CoNLL 2012), pages 631- 642.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "The distributional inclusion hypotheses and lexical entailment", |
|
"authors": [ |
|
{ |
|
"first": "Maayan", |
|
"middle": [], |
|
"last": "Geffet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 43rd Annual Meeting of the Association for Computational Linguistics (ACL 2005)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "107--114", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maayan Geffet and Ido Dagan. 2005. The distributional inclusion hypotheses and lexical entailment. In Pro- ceedings of the 43rd Annual Meeting of the Associa- tion for Computational Linguistics (ACL 2005), pages 107-114.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Large-scale verb entailment acquisition from the web", |
|
"authors": [ |
|
{ |
|
"first": "Chikara", |
|
"middle": [], |
|
"last": "Hashimoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kentaro", |
|
"middle": [], |
|
"last": "Torisawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kow", |
|
"middle": [], |
|
"last": "Kuroda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masaki", |
|
"middle": [], |
|
"last": "Stijn De Saeger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun'ichi", |
|
"middle": [], |
|
"last": "Murata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kazama", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1172--1181", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chikara Hashimoto, Kentaro Torisawa, Kow Kuroda, Stijn De Saeger, Masaki Murata, and Jun'ichi Kazama. 2009. Large-scale verb entailment acquisition from the web. In Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing (EMNLP 2009), pages 1172-1181.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Extracting paraphrases from definition sentences on the web", |
|
"authors": [ |
|
{ |
|
"first": "Chikara", |
|
"middle": [], |
|
"last": "Hashimoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kentaro", |
|
"middle": [], |
|
"last": "Torisawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Stijn De Saeger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sadao", |
|
"middle": [], |
|
"last": "Jun'ichi Kazama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kurohashi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1087--1097", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chikara Hashimoto, Kentaro Torisawa, Stijn De Saeger, Jun'ichi Kazama, and Sadao Kurohashi. 2011. Ex- tracting paraphrases from definition sentences on the web. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Hu- man Language Technologies, pages 1087-1097, Port- land, Oregon, USA, June. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Exploiting Wikipedia as external knowledge for named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Kentaro", |
|
"middle": [], |
|
"last": "Jun'ichi Kazama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Torisawa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "698--707", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jun'ichi Kazama and Kentaro Torisawa. 2007. Exploit- ing Wikipedia as external knowledge for named entity recognition. In Proceedings of the 2007 Joint Confer- ence on Empirical Methods in Natural Language Pro- cessing and Computational Natural Language Learn- ing (EMNLP-CoNLL), pages 698-707, Prague, Czech Republic, June. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Moses: Open source toolkit for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Bertoldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brooke", |
|
"middle": [], |
|
"last": "Cowan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wade", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Moran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Zens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Bojar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics Companion Volume Proceedings of the Demo and Poster Sessions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--180", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn, Hieu Hoang, Alexandra Birch, Chris Callison-Burch, Marcello Federico, Nicola Bertoldi, Brooke Cowan, Wade Shen, Christine Moran, Richard Zens, Chris Dyer, Ond\u0159ej Bojar, Alexandra Con- stantin, and Evan Herbst. 2007. Moses: Open source toolkit for statistical machine translation. In Proceed- ings of the 45th Annual Meeting of the Association for Computational Linguistics Companion Volume Pro- ceedings of the Demo and Poster Sessions, pages 177- 180, Prague, Czech Republic, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "An error-driven word-character hybrid model for joint chinese word segmentation and pos tagging", |
|
"authors": [ |
|
{ |
|
"first": "Canasai", |
|
"middle": [], |
|
"last": "Kruengkrai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kiyotaka", |
|
"middle": [], |
|
"last": "Uchimoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiou", |
|
"middle": [], |
|
"last": "Jun'ichi Kazama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kentaro", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hitoshi", |
|
"middle": [], |
|
"last": "Torisawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Isahara", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "513--521", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Canasai Kruengkrai, Kiyotaka Uchimoto, Jun'ichi Kazama, Yiou Wang, Kentaro Torisawa, and Hitoshi Isahara. 2009. An error-driven word-character hybrid model for joint chinese word segmentation and pos tagging. In Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th Interna- tional Joint Conference on Natural Language Process- ing of the AFNLP, pages 513-521, Suntec, Singapore, August. Association for Computational Linguistics. Sadao Kurohashi and Daisuke Kawahara. 2009a. Japanese morphological analyzer system ju- man version 6.0 (in japanese). Kyoto University, http://nlp.ist.i.kyoto-u.ac.jp/EN/index.php?JUMAN. Sadao Kurohashi and Daisuke Kawahara. 2009b.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Japanese syntax and case analyzer knp version 3.0 (in japanese)", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Japanese syntax and case analyzer knp version 3.0 (in japanese). Kyoto University, http://nlp.ist.i.kyoto- u.ac.jp/EN/index.php?KNP.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Measurement of observer agreement for categorical data", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Landis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gary", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Koch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1977, |
|
"venue": "Biometrics", |
|
"volume": "33", |
|
"issue": "1", |
|
"pages": "159--174", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Richard Landis and Gary G. Koch. 1977. Measure- ment of observer agreement for categorical data. Bio- metrics, 33(1):159-174.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Discovery of inference rules for question answering", |
|
"authors": [ |
|
{ |
|
"first": "Dekang", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Pantel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Natural Language Engineering", |
|
"volume": "7", |
|
"issue": "4", |
|
"pages": "343--360", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dekang Lin and Patrick Pantel. 2001. Discovery of infer- ence rules for question answering. Natural Language Engineering, 7(4):343-360.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Identifying synonyms among distributionally similar words", |
|
"authors": [ |
|
{ |
|
"first": "Dekang", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaojun", |
|
"middle": [], |
|
"last": "Zhao Lijuan Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the 18th International Joint Conference on Artificial Intelligence (IJCAI-03)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1492--1493", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dekang Lin, Shaojun Zhao Lijuan Qin, and Ming Zhou. 2003. Identifying synonyms among distributionally similar words. In Proceedings of the 18th Inter- national Joint Conference on Artificial Intelligence (IJCAI-03), pages 1492-1493.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Multilingual dependency analysis with a twostage discriminative parser", |
|
"authors": [ |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Lerman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the Tenth Conference on Computational Natural Language Learning, CoNLL-X '06", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "216--220", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan McDonald, Kevin Lerman, and Fernando Pereira. 2006. Multilingual dependency analysis with a two- stage discriminative parser. In Proceedings of the Tenth Conference on Computational Natural Lan- guage Learning, CoNLL-X '06, pages 216-220, New York City, New York.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Learning word-class lattices for definition and hypernym extraction", |
|
"authors": [ |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paola", |
|
"middle": [], |
|
"last": "Velardi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1318--1327", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roberto Navigli and Paola Velardi. 2010. Learning word-class lattices for definition and hypernym extrac- tion. In Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics, pages 1318-1327, Uppsala, Sweden, July. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "An annotated dataset for extracting definitions and hypernyms from the web", |
|
"authors": [ |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paola", |
|
"middle": [], |
|
"last": "Velardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juana", |
|
"middle": [ |
|
"Mar\u00eda" |
|
], |
|
"last": "Ruiz-Mart\u00ednez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of LREC 2010", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3716--3722", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roberto Navigli, Paola Velardi, and Juana Mar\u00eda Ruiz- Mart\u00ednez. 2010. An annotated dataset for extracting definitions and hypernyms from the web. In Proceed- ings of LREC 2010, pages 3716-3722.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Aligning needles in a haystack: paraphrase acquisition across the web", |
|
"authors": [ |
|
{ |
|
"first": "Marius", |
|
"middle": [], |
|
"last": "Pa\u015fca", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P\u00e9ter", |
|
"middle": [], |
|
"last": "Dienes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the Second international joint conference on Natural Language Processing, IJCNLP'05", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "119--130", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marius Pa\u015fca and P\u00e9ter Dienes. 2005. Aligning needles in a haystack: paraphrase acquisition across the web. In Proceedings of the Second international joint con- ference on Natural Language Processing, IJCNLP'05, pages 119-130, Jeju Island, Korea.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Improvements in part-of-speech tagging with an application to german", |
|
"authors": [ |
|
{ |
|
"first": "Helmut", |
|
"middle": [], |
|
"last": "Schmid", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Proceedings of the ACL SIGDAT-Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "47--50", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Helmut Schmid. 1995. Improvements in part-of-speech tagging with an application to german. In Proceedings of the ACL SIGDAT-Workshop, pages 47-50.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Automatic paraphrase acquisition from news articles", |
|
"authors": [ |
|
{ |
|
"first": "Yusuke", |
|
"middle": [], |
|
"last": "Shinyama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Satoshi", |
|
"middle": [], |
|
"last": "Sekine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kiyoshi", |
|
"middle": [], |
|
"last": "Sudo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 2nd international Conference on Human Language Technology Research (HLT2002)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "313--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yusuke Shinyama, Satoshi Sekine, and Kiyoshi Sudo. 2002. Automatic paraphrase acquisition from news ar- ticles. In Proceedings of the 2nd international Con- ference on Human Language Technology Research (HLT2002), pages 313-318.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Learning entailment rules for unary template", |
|
"authors": [ |
|
{ |
|
"first": "Idan", |
|
"middle": [], |
|
"last": "Szpektor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 22nd International Conference on Computational Linguistics (COLING2008)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "849--856", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Idan Szpektor and Ido Dagan. 2008. Learning entail- ment rules for unary template. In Proceedings of the 22nd International Conference on Computational Lin- guistics (COLING2008), pages 849-856.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Unsupervised segmentation helps supervised learning of character tagging for word segmentation and named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chunyu", |
|
"middle": [], |
|
"last": "Kit", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the Sixth SIGHAN Workshop on Chinese Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "106--111", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hai Zhao and Chunyu Kit. 2008. Unsupervised seg- mentation helps supervised learning of character tag- ging for word segmentation and named entity recog- nition. In Proceedings of the Sixth SIGHAN Workshop on Chinese Language Processing, pages 106-111, Hy- derabad, India.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Multilingual definition pairs on \"paraphrasing.\"", |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"text": "Overall picture of our method.", |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"text": "Web sentence: Albert Pujols is a baseball player. Term-marked sentence 1: [term] is a baseball player. Term-marked sentence 2: Albert Pujols is a [term].", |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF3": { |
|
"text": "Term-marked sentences from a Web sentence.", |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF4": { |
|
"text": "Precision curves of Exp2: English (A), Chinese (B), and Japanese (C).", |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"text": "Examples of English patterns.", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table><tr><td>tence and mark it up as a (false) defined term. Any</td></tr><tr><td>marked term is uniformly replaced with [term].</td></tr></table>" |
|
}, |
|
"TABREF3": { |
|
"text": "Values of frequency threshold.", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF4": { |
|
"text": "", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF5": { |
|
"text": "Language-dependent components. ) is the set of candidate phrase pairs whose Diff is the same as (p 1 , p 2 ). 2 M is the number of similarity subfunction types whose weight is 1, i.e. M = 8 (all the subfunction types except f 4 and f 5 ). It is used to normalize the value of each occurrence of Diff to [0, 1]. 3 globalSim is global in the sense that it considers all the definition pairs that have a phrase pair with the same Diff as a target candidate phrase pair (p 1 , p 2 ).", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table><tr><td>globalSim(p 1 , p 2 ) =</td><td>(pi,pj )\u2208P P (p1,p2)</td><td>localSim(p i , p j ) M</td><td>.</td></tr><tr><td>P P (p 1 , p 2</td><td/><td/><td/></tr></table>" |
|
}, |
|
"TABREF7": { |
|
"text": "Definition classification results on T rDat wcl .", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF8": { |
|
"text": "Proposed Score : Our method. Outputs are ranked by Score. Proposed local : This is the same as Proposed Score except that it ranks outputs by localSim. The performance drop from Proposed Score shows globalSim's effectiveness. Hashi sup : Hashimoto et al.'s supervised method. Training data is the same as Hashimoto et al. Outputs are ranked by the SVM score (the distance from the hyperplane). This is for Japanese only. Hashi uns : The unsupervised version of Hashi sup . Outputs are ranked by the sum of feature values. Japanese only. SMT: The phrase table construction method of Moses", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF9": { |
|
"text": "Evaluated paraphrase extraction methods.", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF10": { |
|
"text": "Examples of extracted English paraphrases.", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |