|
{ |
|
"paper_id": "S12-1034", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:24:03.174608Z" |
|
}, |
|
"title": "Monolingual Distributional Similarity for Text-to-Text Generation", |
|
"authors": [ |
|
{ |
|
"first": "Juri", |
|
"middle": [], |
|
"last": "Ganitkevitch", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Johns Hopkins University Baltimore", |
|
"location": { |
|
"postCode": "21218", |
|
"region": "MD", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [ |
|
"Van" |
|
], |
|
"last": "Durme", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Johns Hopkins University Baltimore", |
|
"location": { |
|
"postCode": "21218", |
|
"region": "MD", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Johns Hopkins University Baltimore", |
|
"location": { |
|
"postCode": "21218", |
|
"region": "MD", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Previous work on paraphrase extraction and application has relied on either parallel datasets, or on distributional similarity metrics over large text corpora. Our approach combines these two orthogonal sources of information and directly integrates them into our paraphrasing system's log-linear model. We compare different distributional similarity feature-sets and show significant improvements in grammaticality and meaning retention on the example text-to-text generation task of sentence compression, achieving stateof-the-art quality.", |
|
"pdf_parse": { |
|
"paper_id": "S12-1034", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Previous work on paraphrase extraction and application has relied on either parallel datasets, or on distributional similarity metrics over large text corpora. Our approach combines these two orthogonal sources of information and directly integrates them into our paraphrasing system's log-linear model. We compare different distributional similarity feature-sets and show significant improvements in grammaticality and meaning retention on the example text-to-text generation task of sentence compression, achieving stateof-the-art quality.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "A wide variety of applications in natural language processing can be cast in terms of text-to-text generation. Given input in the form of natural language, a text-to-text generation system produces natural language output that is subject to a set of constraints. Compression systems, for instance, produce shorter sentences. Paraphrases, i.e. differing textual realizations of the same meaning, are a crucial components of text-to-text generation systems, and have been successfully applied to tasks such as multi-document summarization (Barzilay et al., 1999; Barzilay, 2003) , query expansion (Anick and Tipirneni, 1999; Riezler et al., 2007) , question answering (McKeown, 1979; Ravichandran and Hovy, 2002) , sentence compression (Cohn and Lapata, 2008; Zhao et al., 2009) , and simplification (Wubben et al., 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 537, |
|
"end": 560, |
|
"text": "(Barzilay et al., 1999;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 561, |
|
"end": 576, |
|
"text": "Barzilay, 2003)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 595, |
|
"end": 622, |
|
"text": "(Anick and Tipirneni, 1999;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 623, |
|
"end": 644, |
|
"text": "Riezler et al., 2007)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 666, |
|
"end": 681, |
|
"text": "(McKeown, 1979;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 682, |
|
"end": 710, |
|
"text": "Ravichandran and Hovy, 2002)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 734, |
|
"end": 757, |
|
"text": "(Cohn and Lapata, 2008;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 758, |
|
"end": 776, |
|
"text": "Zhao et al., 2009)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 798, |
|
"end": 819, |
|
"text": "(Wubben et al., 2012)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Paraphrase collections for text-to-text generation have been extracted from a variety of different corpora. Several approaches rely on bilingual paral-lel data (Bannard and Callison-Burch, 2005; Zhao et al., 2008; Callison-Burch, 2008; , while others leverage distributional methods on monolingual text corpora (Lin and Pantel, 2001; Bhagat and Ravichandran, 2008) . So far, however, only preliminary studies have been undertaken to combine the information from these two sources (Chan et al., 2011) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 194, |
|
"text": "(Bannard and Callison-Burch, 2005;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 195, |
|
"end": 213, |
|
"text": "Zhao et al., 2008;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 214, |
|
"end": 235, |
|
"text": "Callison-Burch, 2008;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 311, |
|
"end": 333, |
|
"text": "(Lin and Pantel, 2001;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 334, |
|
"end": 364, |
|
"text": "Bhagat and Ravichandran, 2008)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 480, |
|
"end": 499, |
|
"text": "(Chan et al., 2011)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we describe an extension of Ganitkevitch et al. 2011's bilingual data-based approach. We augment the bilingually-sourced paraphrases using features based on monolingual distributional similarity. More specifically:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We show that using monolingual distributional similarity features improves paraphrase quality beyond what we can achieve with features estimated from bilingual data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We define distributional similarity for paraphrase patterns that contain constituent-level gaps, e.g. sim(one JJ instance of NP , a JJ case of NP ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This generalizes over distributional similarity for contiguous phrases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We compare different types of monolingual distributional information and show that they can be used to achieve significant improvements in grammaticality.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Finally, we compare our method to several strong baselines on the text-to-text generation task of sentence compression. Our method shows state-of-the-art results, beating a purely bilingually sourced paraphrasing system. 1: Pivot-based paraphrase extraction for contiguous phrases. Two phrases translating to the same phrase in the foreign language are assumed to be paraphrases of one another.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Approaches to paraphrase extraction differ based on their underlying data source. In Section 2.1 we outline pivot-based paraphrase extraction from bilingual data, while the contextual features used to determine closeness in meaning in monolingual approaches is described in Section 2.2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Following Ganitkevitch et al. 2011, we formulate our paraphrases as a syntactically annotated synchronous context-free grammar (SCFG) (Aho and Ullman, 1972; Chiang, 2005 ). An SCFG rule has the form:", |
|
"cite_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 156, |
|
"text": "(Aho and Ullman, 1972;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 169, |
|
"text": "Chiang, 2005", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrase Extraction via Pivoting", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "r = C \u2192 f, e, \u223c, \u03d5 ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrase Extraction via Pivoting", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where the left-hand side of the rule, C, is a nonterminal and the right-hand sides f and e are strings of terminal and nonterminal symbols. There is a one-to-one correspondency between the nonterminals in f and e: each nonterminal symbol in f has to also appear in e. The function \u223c captures this bijective mapping between the nonterminals. Drawing on machine translation terminology, we refer to f as the source and e as the target side of the rule. Each rule is annotated with a feature vector of feature functions \u03d5 = {\u03d5 1 ...\u03d5 N } that, using a corresponding weight vector \u03bb, are combined in a loglinear model to compute the cost of applying r:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrase Extraction via Pivoting", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "cost(r) = \u2212 N i=1 \u03bb i log \u03d5 i .", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Paraphrase Extraction via Pivoting", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "A wide variety of feature functions can be formulated. We detail the feature-set used in our experiments in Section 4. Figure 2 : Extraction of syntactic paraphrases via the pivoting approach: We aggregate over different surface realizations, matching the lexicalized portions of the rule and generalizing over the nonterminals.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 127, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Paraphrase Extraction via Pivoting", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "To extract paraphrases we follow the intuition that two English strings e 1 and e 2 that translate to the same foreign string f can be assumed to have the same meaning, as illustrated in Figure 1 . 1 First, we use standard machine translation methods to extract a foreign-to-English translation grammar from a bilingual parallel corpus (Koehn, 2010) . Then, for each pair of translation rules where the left-hand side C and foreign string f match:", |
|
"cite_spans": [ |
|
{ |
|
"start": 198, |
|
"end": 199, |
|
"text": "1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 336, |
|
"end": 349, |
|
"text": "(Koehn, 2010)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 187, |
|
"end": 195, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Paraphrase Extraction via Pivoting", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "r 1 = C \u2192 f, e 1 , \u223c 1 , \u03d5 1 r 2 = C \u2192 f, e 2 , \u223c 2 , \u03d5 2 ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrase Extraction via Pivoting", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "we pivot over f to create a paraphrase rule r p :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrase Extraction via Pivoting", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "r p = C \u2192 e 1 , e 2 , \u223c p , \u03d5 p ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrase Extraction via Pivoting", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "with a combined nonterminal correspondency function \u223c p . Note that the common source side f implies that e 1 and e 2 share the same set of nonterminal symbols.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrase Extraction via Pivoting", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The paraphrase feature vector \u03d5 p is computed from the translation feature vectors \u03d5 1 and \u03d5 2 by following the pivoting idea. For instance, we estimate the conditional paraphrase probability p(e 2 |e 1 ) by marginalizing over all shared foreign-language translations f : After the SCFG has been extracted, it can be used within standard machine translation machinery, such as the Joshua decoder (Ganitkevitch et al., 2012) . Figure 3 shows an example for a synchronous paraphrastic derivation produced as a result of applying our paraphrase grammar in the decoding process.", |
|
"cite_spans": [ |
|
{ |
|
"start": 396, |
|
"end": 423, |
|
"text": "(Ganitkevitch et al., 2012)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 426, |
|
"end": 434, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Paraphrase Extraction via Pivoting", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(e 2 |e 1 ) = f p(e 2 , f |e 1 ) (2) = f p(e 2 |f, e 1 )p(f |e 1 ) (3) \u2248 f p(e 2 |f )p(f |e 1 ).", |
|
"eq_num": "(4" |
|
} |
|
], |
|
"section": "Paraphrase Extraction via Pivoting", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The approach outlined relies on aligned bilingual texts to identify phrases and patterns that are equivalent in meaning. When extracting paraphrases from monolingual text, we have to rely on an entirely different set of semantic cues and features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Paraphrase Extraction via Pivoting", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Methods based on monolingual text corpora measure the similarity of phrases based on contextual features. To describe a phrase e, we define a set of features that capture the context of an occurrence of e in our corpus. Writing the context vector for the i-th occurrence of e as s e,i , we can aggregate over all occurrences of e, resulting in a distributional signature for e, s e = i s e,i . Following the intuition that phrases with similar meanings occur in similar contexts, we can then quantify the goodness of e as a paraphrase of e by computing the cosine similarity between their distributional signatures:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Monolingual Distributional Similarity", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "sim(e, e ) = s e \u2022 s e | s e || s e | .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Monolingual Distributional Similarity", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "A wide variety of features have been used to describe the distributional context of a phrase. Rich, linguistically informed feature-sets that rely on dependency and constituency parses, part-of-speech tags, or lemmatization have been proposed in widely known work such as by Church and Hanks (1991) and Lin and Pantel (2001) . For instance, a phrase is described by the various syntactic relations it has with lexical items in its context, such as: \"for what verbs do we see with the phrase as the subject?\", or \"what adjectives modify the phrase?\". However, when moving to vast text collections or collapsed representations of large text corpora, linguistic annotations can become impractically expensive to produce. A straightforward and widely used solution is to fall back onto lexical n-gram features, e.g. \"what words or bigrams have we seen to the left of this phrase?\" A substantial body of work has focussed on using this type of feature-set for a variety of purposes in NLP (Lapata and Keller, 2005; Bhagat and Ravichandran, 2008; Lin et al., 2010; Van Durme and Lall, 2010) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 275, |
|
"end": 298, |
|
"text": "Church and Hanks (1991)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 303, |
|
"end": 324, |
|
"text": "Lin and Pantel (2001)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 984, |
|
"end": 1009, |
|
"text": "(Lapata and Keller, 2005;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1010, |
|
"end": 1040, |
|
"text": "Bhagat and Ravichandran, 2008;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1041, |
|
"end": 1058, |
|
"text": "Lin et al., 2010;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 1059, |
|
"end": 1084, |
|
"text": "Van Durme and Lall, 2010)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Monolingual Distributional Similarity", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Recently, Chan et al. 2011presented an initial investigation into combining phrasal paraphrases obtained through bilingual pivoting with monolingual distributional information. Their work investigated a reranking approach and evaluated their method via a substitution task, showing that the two sources of information are complementary and can yield improvements in paraphrase quality when combined.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Other Related Work", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "In order to incorporate distributional similarity information into the paraphrasing system, we need to calculate similarity scores for the paraphrastic SCFG rules in our grammar. For rules with purely lexical right-hand sides e 1 and e 2 this is a simple task, and the similarity score sim(e 1 , e 2 ) can be directly included in the rule's feature vector \u03d5. However, if e 1 and e 2 are long, their occurrences become sparse and their similarity can no longer be reliably estimated. In our case, the right-hand sides of our rules often contain gaps and computing a similarity score is less straightforward. Figure 4 shows an example of such a discontinuous rule and illustrates our solution: we decompose the discontinuous patterns that make up the right-hand sides of a rule r into pairs of contiguous phrases P(r) = { e, e }, for which we can look up distributional signatures and compute similarity scores. This decomposition into phrases is nontrivial, since our sentential paraphrase rules often involve significant reordering or structural changes.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 607, |
|
"end": 615, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Incorporating Distributional Similarity", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "To avoid comparing unrelated phrase pairs, we require P(r) to be consistent with a token alignment a. The alignment is defined analogously to word alignments in machine translation, and computed by treating the source and target sides of our paraphrase rules as a parallel corpus. We define the overall similarity score of the rule to be the average of the similarity scores of all extracted phrase pairs:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Incorporating Distributional Similarity", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "sim(r, a) = 1 |P(a)|", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Incorporating Distributional Similarity", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "(e,e )\u2208P(a) sim(e, e ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Incorporating Distributional Similarity", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Since the distributional signatures for long, rare phrases may be computed from only a handful of occurrences, we additionally query for the shorter sub-phrases that are more likely to have been observed often enough to have reliable signatures and thus similarity estimates.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Incorporating Distributional Similarity", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Our definition of the similarity of two discontinuous phrases substantially differs from others in the literature. This difference is due to a difference in motivation. Lin and Pantel (2001) , for instance, seek to find new paraphrase pairs by comparing their arguments. In this work, however, we try to add orthogonal information to existing paraphrase pairs. Both our definition of pattern similarity and our feature-set (see Section 4.3) are therefore geared towards comparing the substitutability and context similarity of a pair of paraphrases.", |
|
"cite_spans": [ |
|
{ |
|
"start": 169, |
|
"end": 190, |
|
"text": "Lin and Pantel (2001)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Incorporating Distributional Similarity", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Our two similarity scores are incorporated into the paraphraser as additional rule features in \u03d5, sim ngram and sim syn , respectively. We estimate the corresponding weights along with the other \u03bb i as detailed in Section 4.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Incorporating Distributional Similarity", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "To evaluate our method on a real text-to-text application, we use the sentence compression task. To tune the parameters of our paraphrase system for sentence compression, we need an appropriate corpus of reference compressions. Since our model is designed to compress by paraphrasing rather than deletion, the commonly used deletion-based compression data sets like the Ziff-Davis corpus are not suitable. We thus use the dataset introduced in our previous work .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task: Sentence Compression", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Beginning with 9570 tuples of parallel English-English sentences obtained from multiple reference translations for machine translation evaluation, we construct a parallel compression corpus by selecting the longest reference in each tuple as the source sentence and the shortest reference as the target sentence. We further retain only those sentence pairs where the compression ratio cr falls in the range 0.5 < cr \u2264 0.8. From these, we select 936 sentences for the development set, as well as 560 sentences for a test set that we use to gauge the performance of our system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task: Sentence Compression", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We contrast our distributional similarity-informed paraphrase system with a pivoting-only baseline, as well as an implementation of Clarke and Lapata (2008) 's state-of-the-art compression model which uses a series of constraints in an integer linear programming (ILP) solver.", |
|
"cite_spans": [ |
|
{ |
|
"start": 132, |
|
"end": 156, |
|
"text": "Clarke and Lapata (2008)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task: Sentence Compression", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We extract our paraphrase grammar from the French-English portion of the Europarl corpus (version 5) (Koehn, 2005) . The Berkeley aligner (Liang et al., 2006) and the Berkeley parser (Petrov and Klein, 2007) are used to align the bitext and parse the English side, respectively. The paraphrase grammar is produced using the Hadoop-based Thrax Figure 5 : An example of the n-gram feature extraction on an n-gram corpus. Here, \"the long-term\" is seen preceded by \"revise\" (43 times) and followed by \"plans\" (97 times). The corresponding left-and right-side features are added to the phrase signature with the counts of the n-grams that gave rise to them. grammar extractor's paraphrase mode (Ganitkevitch et al., 2012) . The syntactic nonterminal labels we allowed in the grammar were limited to constituent labels and CCG-style slashed categories. Paraphrase grammars extracted via pivoting tend to grow very large. To keep the grammar size manageable, we pruned away all paraphrase rules whose phrasal paraphrase probabilities p(e 1 |e 2 ) or p(e 2 |e 1 ) were smaller than 0.001.", |
|
"cite_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 114, |
|
"text": "(Koehn, 2005)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 138, |
|
"end": 158, |
|
"text": "(Liang et al., 2006)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 183, |
|
"end": 207, |
|
"text": "(Petrov and Klein, 2007)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 689, |
|
"end": 716, |
|
"text": "(Ganitkevitch et al., 2012)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 343, |
|
"end": 351, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Baseline Paraphrase Grammar", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We extend the feature-set used in Ganitkevitch et al. (2011) with a number of features that aim to better describe a rule's compressive power: on top of the word count features wcount src and wcount tgt and the word count difference feature wcount diff , we add character based count and difference features ccount src , ccount tgt , and ccount diff , as well as logcompression ratio features word cr = log wcounttgt wcountsrc and the analogously defined char cr = log ccounttgt ccountsrc . For model tuning and decoding we used the Joshua machine translation system (Ganitkevitch et al., 2012) . The model weights are estimated using an implementation of the PRO tuning algorithm (Hopkins and May, 2011), with PR\u00c9CIS as our objective function ). The language model used in our paraphraser and the Clarke and Lapata (2008) baseline system is a Kneser-Ney discounted 5-gram model estimated on the Gigaword corpus using the SRILM toolkit (Stolcke, 2002) . Figure 6 : An example of the syntactic featureset. The phrase \"the long-term\" is annotated with position-aware lexical and part-of-speech n-gram features (e.g. \"on to\" on the left, and \"investment\" and \"NN\" to its right), labeled dependency links (e.g. amod \u2212 investment) and features derived from the phrase's CCG label NP /NN .", |
|
"cite_spans": [ |
|
{ |
|
"start": 567, |
|
"end": 594, |
|
"text": "(Ganitkevitch et al., 2012)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 798, |
|
"end": 822, |
|
"text": "Clarke and Lapata (2008)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 936, |
|
"end": 951, |
|
"text": "(Stolcke, 2002)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 954, |
|
"end": 962, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Baseline Paraphrase Grammar", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2318 = sig syntax \u21e3 dep-det-R-investment pos-L-TO pos-R-NN lex-R-investment lex-L-to dep-amod-R-investment syn-gov-NP syn-miss-L-NN lex-L-on-to pos-L-IN-TO dep-det-R-NN dep-amod-R-NN", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline Paraphrase Grammar", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "To investigate the impact of the feature-set used to construct distributional signatures, we contrast two approaches: a high-coverage collection of distributional signatures with a relatively simple feature-set, and a much smaller set of signatures with a rich, syntactically informed feature-set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Distributional Similarity Model", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The high-coverage model (from here on: n-gram model) is drawn from a web-scale n-gram corpus (Brants and Franz, 2006; Lin et al., 2010) . We extract signatures for phrases up to a length of 4. For each phrase p we look at n-grams of the form wp and pv, where w and v are single words. We then extract the corresponding features w left and v right . The feature count is set to the count of the n-gram, reflecting the frequency with which p was preceded or followed, respectively, by w and v in the data the n-gram corpus is based on. Figure 5 illustrates this feature extraction approach. The resulting collection comprises distributional signatures for the 200 million most frequent 1-to-4-grams in the n-gram corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 93, |
|
"end": 117, |
|
"text": "(Brants and Franz, 2006;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 118, |
|
"end": 135, |
|
"text": "Lin et al., 2010)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 534, |
|
"end": 542, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "n-gram Model", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "For the syntactically informed signature model (from here on: syntax model), we use the constituency and dependency parses provided in the Annotated Gigaword corpus (Napoles et al., 2012) . We limit ourselves to the Los Angeles Times/Washington Post portion of the corpus and extract phrases up to a length of 4. The following feature set is used to compute distributional signatures for the extracted phrases:", |
|
"cite_spans": [ |
|
{ |
|
"start": 165, |
|
"end": 187, |
|
"text": "(Napoles et al., 2012)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Syntactic Model", |
|
"sec_num": "4.3.2" |
|
}, |
|
{ |
|
"text": "\u2022 Position-aware lexical and part-of-speech unigram and bigram features, drawn from a threeword window to the right and left of the phrase.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Syntactic Model", |
|
"sec_num": "4.3.2" |
|
}, |
|
{ |
|
"text": "\u2022 Features based on dependencies for both links into and out of the phrase, labeled with the corresponding lexical item and POS. If the phrase corresponds to a complete subtree in the constituency parse we additionally include lexical and POS features for its head word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Syntactic Model", |
|
"sec_num": "4.3.2" |
|
}, |
|
{ |
|
"text": "\u2022 Syntactic features for any constituents governing the phrase, as well as for CCG-style slashed constituent labels for the phrase. The latter are split in governing constituent and missing constituent (with directionality). Figure 6 illustrates the syntax model's feature extraction for an example phrase occurrence. Using this method we extract distributional signatures for over 12 million 1-to-4-gram phrases.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 225, |
|
"end": 233, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Syntactic Model", |
|
"sec_num": "4.3.2" |
|
}, |
|
{ |
|
"text": "Collecting distributional signatures for a large number of phrases quickly leads to unmanageably large datasets. Storing the syntax model's 12 million signatures in a compressed readable format, for instance, requires over 20GB of disk space. Like Ravichandran et al. (2005) and Bhagat and Ravichandran (2008) , we rely on locality sensitive hashing (LSH) to make the use of these large collections practical.", |
|
"cite_spans": [ |
|
{ |
|
"start": 248, |
|
"end": 274, |
|
"text": "Ravichandran et al. (2005)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 279, |
|
"end": 309, |
|
"text": "Bhagat and Ravichandran (2008)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Locality Sensitive Hashing", |
|
"sec_num": "4.3.3" |
|
}, |
|
{ |
|
"text": "In order to avoid explicitly computing the feature vectors, which can be memory intensive for frequent phrases, we chose the online LSH variant described by Van Durme and Lall (2010), as implemented in the Jerboa toolkit (Van Durme, 2012). This method, based on the earlier work of Indyk and Motwani (1998) and Charikar (2002) , approximates the cosine similarity between two feature vectors based on the Hamming distance in a dimensionalityreduced bitwise representation. Two feature vectors u, v each of dimension d are first projected through a d \u00d7 b random matrix populated with draws from N (0, 1). We then convert the resulting bdimensional vectors into bit-vectors by setting each bit of the signature conditioned on whether the corresponding projected value is less than 0. Now, given the bit signatures h( u) and h( v), we can approximate the cosine similarity of u and v as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 282, |
|
"end": 306, |
|
"text": "Indyk and Motwani (1998)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 311, |
|
"end": 326, |
|
"text": "Charikar (2002)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Locality Sensitive Hashing", |
|
"sec_num": "4.3.3" |
|
}, |
|
{ |
|
"text": "sim (u, v) = cos D(h( u), h( v)) b \u03c0 , where d(\u2022, \u2022)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Locality Sensitive Hashing", |
|
"sec_num": "4.3.3" |
|
}, |
|
{ |
|
"text": "is the Hamming distance. In our experiments we use 256-bit signatures. This reduces the memory requirements for the syntax model to around 600MB.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Locality Sensitive Hashing", |
|
"sec_num": "4.3.3" |
|
}, |
|
{ |
|
"text": "To rate the quality of our output, we solicit human judgments of the compressions along two five-point scales: grammaticality and meaning preservation. Judges are instructed to decide how much the meaning from a reference translation is retained in the compressed sentence, with a score of 5 indicating that all of the important information is present, and 1 being that the compression does not retain any of the original meaning. Similarly, a grammar score of 5 indicates perfect grammaticality, while a score of 1 is assigned to sentences that are entirely ungrammatical. We ran our evaluation on Mechanical Turk, where a total of 126 judges provided 3 redundant judgments for each system output. To provide additional quality control, our HITs were augmented with both positive and negative control compressions. For the positive control we used the reference compressions from our test set. Negative control was provided by adding a compression model based on random word deletions to the mix. In Table 1 we compare our distributional similarity-augmented systems to the plain pivotingbased baseline and the ILP approach. The compression ratios of the paraphrasing systems are tuned to match the average compression ratio seen on the development and test set. The ILP system is config-ured to loosely match this ratio, as to not overly constrain its search space. Our results indicate that the paraphrase approach significantly outperforms ILP on meaning retention. However, the baseline system shows notable weaknesses in grammaticality. Adding the n-gram distributional similarity model to the paraphraser recovers some of the difference in grammaticality while simultaneously yielding some gain in the compressions' meaning retention. Moving to distributional similarity estimated on the syntactic feature-set yields additional improvement, despite the model's lower coverage.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1001, |
|
"end": 1008, |
|
"text": "Table 1", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "It is known that human evaluation scores correlate linearly with the compression ratio produced by a sentence compression system . Thus, to ensure fairness in our comparisons, we produce a pairwise comparison breakdown that only takes into account compressions of almost identical length. 2 Figure 7 shows the results of this analysis, detailing the number of wins and ties in the human judgements.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 291, |
|
"end": 299, |
|
"text": "Figure 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We note that the gains in meaning retention over both the baseline and the ILP system are still present in the pairwise breakdown. The gains over the paraphrasing baseline, as well as the improvement in meaning over ILP are statistically significant at p < 0.05 (using the sign test).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We can observe that there is substantial overlap between the baseline paraphraser and the n-gram model, while the syntax model appears to yield noticeably different output far more often. Table 2 shows two example sentences drawn from our test set and the compressions produced by the different systems. It can be seen that both the paraphrase-based and ILP systems produce good quality results, with the paraphrase system retaining the meaning of the source sentence more accurately.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 188, |
|
"end": 195, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We presented a method to incorporate monolingual distributional similarity into linguistically informed paraphrases extracted from bilingual parallel data. Having extended the notion of similarity to discontiguous pattern with multi-word gaps, we investigated the effect of using feature-sets of varying 2 We require the compressions to be within \u00b110% length of one another. Figure 7 : A pairwise breakdown of the human judgments comparing the systems. Dark grey regions show the number of times the two systems were tied, and light grey shows how many times one system was judged to be better than the other. complexity to compute distributional similarity for our paraphrase collection. We conclude that, compared to a simple large-scale model, a rich, syntaxbased feature-set, even with significantly lower coverage, noticeably improves output quality in a textto-text generation task. Our syntactic method significantly improves grammaticality and meaning retention over a strong paraphrastic baseline, and offers substantial gains in meaning retention over a deletion-based state-of-the-art system. Reference we should ponder it and decide our path and follow it , thanks .", |
|
"cite_spans": [ |
|
{ |
|
"start": 304, |
|
"end": 305, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 375, |
|
"end": 383, |
|
"text": "Figure 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Syntax now we think and decide on our way and choose one way . thanks .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "n-gram now we have and decide on our way and choose one way . thanks . PP now we have and decide on our way and choose one way . thanks .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "we have to think and make a decision and choose way thanks Table 2 : Example compressions produced by our systems and the baselines Table 1 for three input sentences from our test data.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 66, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 132, |
|
"end": 139, |
|
"text": "Table 1", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "ILP", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "See Yao et al. (2012) for an analysis of this assumption.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "Acknowledgements This research was supported in part by the NSF under grant IIS-0713448 and in part by the EuroMatrixPlus project funded by the European Commission (7th Framework Programme). Opinions, interpretations, and conclusions are the authors' alone.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "acknowledgement", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "The Theory of Parsing, Translation, and Compiling", |
|
"authors": [ |
|
{ |
|
"first": "Alfred", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Aho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Ullman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1972, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alfred V. Aho and Jeffrey D. Ullman. 1972. The Theory of Parsing, Translation, and Compiling. Prentice Hall.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "The paraphrase search assistant: terminological feedback for iterative information seeking", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suresh", |
|
"middle": [], |
|
"last": "Anick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Tipirneni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proceedings of SI-GIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter G. Anick and Suresh Tipirneni. 1999. The para- phrase search assistant: terminological feedback for iterative information seeking. In Proceedings of SI- GIR.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Paraphrasing with bilingual parallel corpora", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Bannard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Bannard and Chris Callison-Burch. 2005. Para- phrasing with bilingual parallel corpora. In Proceed- ings of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Information fusion in the context of multi-document summarization", |
|
"authors": [ |
|
{ |
|
"first": "Regina", |
|
"middle": [], |
|
"last": "Barzilay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathleen", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Mckeown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Elhadad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Regina Barzilay, Kathleen R. McKeown, and Michael Elhadad. 1999. Information fusion in the context of multi-document summarization. In Proceedings of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Information Fusion for Mutlidocument Summarization: Paraphrasing and Generation", |
|
"authors": [ |
|
{ |
|
"first": "Regina", |
|
"middle": [], |
|
"last": "Barzilay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Regina Barzilay. 2003. Information Fusion for Mutli- document Summarization: Paraphrasing and Genera- tion. Ph.D. thesis, Columbia University, New York.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Large scale acquisition of paraphrases for learning surface patterns", |
|
"authors": [ |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Bhagat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deepak", |
|
"middle": [], |
|
"last": "Ravichandran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of ACL/HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rahul Bhagat and Deepak Ravichandran. 2008. Large scale acquisition of paraphrases for learning surface patterns. In Proceedings of ACL/HLT.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Web 1T 5-gram version 1", |
|
"authors": [ |
|
{ |
|
"first": "Thorsten", |
|
"middle": [], |
|
"last": "Brants", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Franz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thorsten Brants and Alex Franz. 2006. Web 1T 5-gram version 1.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Syntactic constraints on paraphrases extracted from parallel corpora", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-", |
|
"middle": [], |
|
"last": "Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Callison-Burch. 2008. Syntactic constraints on paraphrases extracted from parallel corpora. In Pro- ceedings of EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Reranking bilingually extracted paraphrases using monolingual distributional similarity", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Tsz Ping Chan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "EMNLP Workshop on GEMS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tsz Ping Chan, Chris Callison-Burch, and Benjamin Van Durme. 2011. Reranking bilingually extracted para- phrases using monolingual distributional similarity. In EMNLP Workshop on GEMS.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Similarity estimation techniques from rounding algorithms", |
|
"authors": [ |
|
{ |
|
"first": "Moses", |
|
"middle": [], |
|
"last": "Charikar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of STOC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Moses Charikar. 2002. Similarity estimation techniques from rounding algorithms. In Proceedings of STOC.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "A hierarchical phrase-based model for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Chiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Chiang. 2005. A hierarchical phrase-based model for statistical machine translation. In Proceedings of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Word association norms, mutual information and lexicography", |
|
"authors": [ |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Church", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Hanks", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1991, |
|
"venue": "Computational Linguistics", |
|
"volume": "6", |
|
"issue": "1", |
|
"pages": "22--29", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenneth Church and Patrick Hanks. 1991. Word asso- ciation norms, mutual information and lexicography. Computational Linguistics, 6(1):22-29.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Global inference for sentence compression: An integer linear programming approach", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Clarke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Journal of Artificial Intelligence Research", |
|
"volume": "31", |
|
"issue": "", |
|
"pages": "273--381", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Clarke and Mirella Lapata. 2008. Global infer- ence for sentence compression: An integer linear pro- gramming approach. Journal of Artificial Intelligence Research, 31:273-381.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Sentence compression beyond word deletion", |
|
"authors": [ |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Cohn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Trevor Cohn and Mirella Lapata. 2008. Sentence com- pression beyond word deletion. In Proceedings of the COLING.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Learning sentential paraphrases from bilingual parallel corpora for text-to-text generation", |
|
"authors": [ |
|
{ |
|
"first": "Juri", |
|
"middle": [], |
|
"last": "Ganitkevitch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Courtney", |
|
"middle": [], |
|
"last": "Napoles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Juri Ganitkevitch, Chris Callison-Burch, Courtney Napoles, and Benjamin Van Durme. 2011. Learning sentential paraphrases from bilingual parallel corpora for text-to-text generation. In Proceedings of EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Joshua 4.0: Packing, PRO, and paraphrases", |
|
"authors": [ |
|
{ |
|
"first": "Juri", |
|
"middle": [], |
|
"last": "Ganitkevitch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuan", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Weese", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Post", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of WMT12", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Juri Ganitkevitch, Yuan Cao, Jonathan Weese, Matt Post, and Chris Callison-Burch. 2012. Joshua 4.0: Packing, PRO, and paraphrases. In Proceedings of WMT12.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Tuning as ranking", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Hopkins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Hopkins and Jonathan May. 2011. Tuning as rank- ing. In Proceedings of EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Approximate nearest neighbors: towards removing the curse of dimensionality", |
|
"authors": [ |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Indyk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rajeev", |
|
"middle": [], |
|
"last": "Motwani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of STOC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piotr Indyk and Rajeev Motwani. 1998. Approximate nearest neighbors: towards removing the curse of di- mensionality. In Proceedings of STOC.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Europarl: A parallel corpus for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "MT summit", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn. 2005. Europarl: A parallel corpus for sta- tistical machine translation. In MT summit, volume 5.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Statistical Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn. 2010. Statistical Machine Translation. Cambridge University Press.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Web-based models for natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Keller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "ACM Transactions on Speech and Language Processing", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mirella Lapata and Frank Keller. 2005. Web-based mod- els for natural language processing. ACM Transac- tions on Speech and Language Processing, 2(1).", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Alignment by agreement", |
|
"authors": [ |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Taskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of HLT/NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Percy Liang, Ben Taskar, and Dan Klein. 2006. Align- ment by agreement. In Proceedings of HLT/NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Discovery of inference rules from text", |
|
"authors": [ |
|
{ |
|
"first": "Dekang", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Pantel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Natural Language Engineering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dekang Lin and Patrick Pantel. 2001. Discovery of infer- ence rules from text. Natural Language Engineering.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "New tools for web-scale n-grams", |
|
"authors": [ |
|
{ |
|
"first": "Dekang", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Church", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Satoshi", |
|
"middle": [], |
|
"last": "Sekine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Yarowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shane", |
|
"middle": [], |
|
"last": "Bergsma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kailash", |
|
"middle": [], |
|
"last": "Patil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Pitler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Lathbury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vikram", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kapil", |
|
"middle": [], |
|
"last": "Dalwani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sushant", |
|
"middle": [], |
|
"last": "Narsale", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dekang Lin, Kenneth Church, Heng Ji, Satoshi Sekine, David Yarowsky, Shane Bergsma, Kailash Patil, Emily Pitler, Rachel Lathbury, Vikram Rao, Kapil Dalwani, and Sushant Narsale. 2010. New tools for web-scale n-grams. In Proceedings of LREC.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Paraphrasing using given and new information in a question-answer system", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Kathleen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mckeown", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1979, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kathleen R. McKeown. 1979. Paraphrasing using given and new information in a question-answer system. In Proceedings of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Paraphrastic sentence compression with a character-based metric: Tightening without deletion", |
|
"authors": [ |
|
{ |
|
"first": "Courtney", |
|
"middle": [], |
|
"last": "Napoles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juri", |
|
"middle": [], |
|
"last": "Ganitkevitch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Workshop on Monolingual Text-To-Text Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Courtney Napoles, Chris Callison-Burch, Juri Ganitke- vitch, and Benjamin Van Durme. 2011. Paraphrastic sentence compression with a character-based metric: Tightening without deletion. Workshop on Monolin- gual Text-To-Text Generation.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Annotated gigaword", |
|
"authors": [ |
|
{ |
|
"first": "Courtney", |
|
"middle": [], |
|
"last": "Napoles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gormley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of AKBC-WEKEX", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Courtney Napoles, Matt Gormley, and Benjamin Van Durme. 2012. Annotated gigaword. In Proceedings of AKBC-WEKEX 2012.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Improved inference for unlexicalized parsing", |
|
"authors": [ |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of HLT/NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Slav Petrov and Dan Klein. 2007. Improved infer- ence for unlexicalized parsing. In Proceedings of HLT/NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Learning sufrace text patterns for a question answering system", |
|
"authors": [ |
|
{ |
|
"first": "Deepak", |
|
"middle": [], |
|
"last": "Ravichandran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deepak Ravichandran and Eduard Hovy. 2002. Learning sufrace text patterns for a question answering system. In Proceedings of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Randomized Algorithms and NLP: Using Locality Sensitive Hash Functions for High Speed Noun Clustering", |
|
"authors": [ |
|
{ |
|
"first": "Deepak", |
|
"middle": [], |
|
"last": "Ravichandran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Pantel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deepak Ravichandran, Patrick Pantel, and Eduard Hovy. 2005. Randomized Algorithms and NLP: Using Lo- cality Sensitive Hash Functions for High Speed Noun Clustering. In Proceedings of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Statistical machine translation for query expansion in answer retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Riezler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Vasserman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stefan Riezler, Alexander Vasserman, Ioannis Tsochan- taridis, Vibhu Mittal, and Yi Liu. 2007. Statistical machine translation for query expansion in answer re- trieval. In Proceedings of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "SRILM -an extensible language modeling toolkit", |
|
"authors": [ |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Stolcke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceeding of the International Conference on Spoken Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andreas Stolcke. 2002. SRILM -an extensible language modeling toolkit. In Proceeding of the International Conference on Spoken Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Online generation of locality sensitive hash signatures", |
|
"authors": [ |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashwin", |
|
"middle": [], |
|
"last": "Lall", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benjamin Van Durme and Ashwin Lall. 2010. Online generation of locality sensitive hash signatures. In Proceedings of ACL, Short Papers.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Jerboa: A toolkit for randomized and streaming algorithms", |
|
"authors": [ |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Human Language Technology Center of Excellence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benjamin Van Durme. 2012. Jerboa: A toolkit for randomized and streaming algorithms. Technical Re- port 7, Human Language Technology Center of Excel- lence, Johns Hopkins University.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Antal van den Bosch, and Emiel Krahmer. 2012. Sentence simplification by monolingual machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Sander", |
|
"middle": [], |
|
"last": "Wubben", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sander Wubben, Antal van den Bosch, and Emiel Krah- mer. 2012. Sentence simplification by monolingual machine translation. In Proceedings of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Expectations of word sense in parallel corpora", |
|
"authors": [ |
|
{ |
|
"first": "Xuchen", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of HLT/NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xuchen Yao, Benjamin Van Durme, and Chris Callison- Burch. 2012. Expectations of word sense in parallel corpora. In Proceedings of HLT/NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Pivot approach for extracting paraphrase patterns from bilingual corpora", |
|
"authors": [ |
|
{ |
|
"first": "Shiqi", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haifeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sheng", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of ACL/HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shiqi Zhao, Haifeng Wang, Ting Liu, and Sheng Li. 2008. Pivot approach for extracting paraphrase pat- terns from bilingual corpora. In Proceedings of ACL/HLT.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Application-driven statistical paraphrase generation", |
|
"authors": [ |
|
{ |
|
"first": "Shiqi", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Lan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sheng", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shiqi Zhao, Xiang Lan, Ting Liu, and Sheng Li. 2009. Application-driven statistical paraphrase generation. In Proceedings of ACL.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"text": "Figure 1: Pivot-based paraphrase extraction for contiguous phrases. Two phrases translating to the same phrase in the foreign language are assumed to be paraphrases of one another.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"text": "An example of a synchronous paraphrastic derivation, here a sentence compression. Shaded words are deleted in the indicated rule applications.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"text": "illustrates syntax-constrained pivoting and feature aggregation over multiple foreign language translations for a paraphrase pattern.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"FIGREF4": { |
|
"type_str": "figure", |
|
"text": "Scoring a rule by extracting and scoring contiguous phrases consistent with the alignment. The overall score of the rule is determined by averaging across all pairs of contiguous subphrases.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"TABREF4": { |
|
"text": "Results of the human evaluation on longer compressions: pairwise compression rates (CR), meaning and grammaticality scores. Bold indicates a statistically significance difference at p < 0.05.", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF5": { |
|
"text": "Source should these political developments have an impact on sports ? Reference should these political events affect sports ? Syntax should these events have an impact on sports ? n-gram these political developments impact on sports ? PP should these events impact on sports ? ILP political developments have an impact Source now we have to think and make a decision about our direction and choose only one way . thanks .", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null |
|
} |
|
} |
|
} |
|
} |