|
{ |
|
"paper_id": "D08-1043", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:30:08.893477Z" |
|
}, |
|
"title": "Bridging Lexical Gaps between Queries and Questions on Large Online Q&A Collections with Compact Translation Models", |
|
"authors": [ |
|
{ |
|
"first": "Jung-Tae", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Korea University", |
|
"location": { |
|
"settlement": "Seoul", |
|
"country": "Korea" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Sang-Bum", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Search Business Team", |
|
"institution": "SK Telecom", |
|
"location": { |
|
"settlement": "Seoul", |
|
"country": "Korea" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Young-In", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Korea University", |
|
"location": { |
|
"settlement": "Seoul", |
|
"country": "Korea" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Hae-Chang", |
|
"middle": [], |
|
"last": "Rim", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Korea University", |
|
"location": { |
|
"settlement": "Seoul", |
|
"country": "Korea" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Lexical gaps between queries and questions (documents) have been a major issue in question retrieval on large online question and answer (Q&A) collections. Previous studies address the issue by implicitly expanding queries with the help of translation models pre-constructed using statistical techniques. However, since it is possible for unimportant words (e.g., non-topical words, common words) to be included in the translation models, a lack of noise control on the models can cause degradation of retrieval performance. This paper investigates a number of empirical methods for eliminating unimportant words in order to construct compact translation models for retrieval purposes. Experiments conducted on a real world Q&A collection show that substantial improvements in retrieval performance can be achieved by using compact translation models.", |
|
"pdf_parse": { |
|
"paper_id": "D08-1043", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Lexical gaps between queries and questions (documents) have been a major issue in question retrieval on large online question and answer (Q&A) collections. Previous studies address the issue by implicitly expanding queries with the help of translation models pre-constructed using statistical techniques. However, since it is possible for unimportant words (e.g., non-topical words, common words) to be included in the translation models, a lack of noise control on the models can cause degradation of retrieval performance. This paper investigates a number of empirical methods for eliminating unimportant words in order to construct compact translation models for retrieval purposes. Experiments conducted on a real world Q&A collection show that substantial improvements in retrieval performance can be achieved by using compact translation models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Community-driven question answering services, such as Yahoo! Answers 1 and Live Search QnA 2 , have been rapidly gaining popularity among Web users interested in sharing information online. By inducing users to collaboratively submit questions and answer questions posed by other users, large amounts of information have been collected in the form of question and answer (Q&A) pairs in recent years. This user-generated information is a valuable resource for many information seekers, because users can acquire information straightforwardly by searching through answered questions that satisfy their information need.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Retrieval models for such Q&A collections should manage to handle the lexical gaps or word mismatches between user questions (queries) and answered questions in the collection. Consider the two following examples of questions that are semantically similar to each other:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 \"Where can I get cheap airplane tickets?\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 \"Any travel website for low airfares?\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Conventional word-based retrieval models would fail to capture the similarity between the two, because they have no words in common. To bridge the query-question gap, prior work on Q&A retrieval by Jeon et al. (2005) implicitly expands queries with the use of pre-constructed translation models, which lets you generate query words not in a question by translation to alternate words that are related. In practice, these translation models are often constructed using statistical machine translation techniques that primarily rely on word co-occurrence statistics obtained from parallel strings (e.g., question-answer pairs).", |
|
"cite_spans": [ |
|
{ |
|
"start": 198, |
|
"end": 216, |
|
"text": "Jeon et al. (2005)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A critical issue of the translation-based approaches is the quality of translation models constructed in advance. If no noise control is conducted during the construction, it is possible for translation models to contain \"unnecessary\" translations (i.e., translating a word into an unimportant word, such as a non-topical or common word). In the query expansion viewpoint, an attempt to identify and decrease the proportion of unnecessary translations in a translation model may produce an effect of \"selective\" implicit query expansion and result in improved retrieval. However, prior work on translation-based Q&A retrieval does not recognize this issue and uses the translation model as it is; essentially no attention seems to have been paid to improving the performance of the translation-based approach by enhancing the quality of translation models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we explore a number of empirical methods for selecting and eliminating unimportant words from parallel strings to avoid unnecessary translations from being learned in translation models built for retrieval purposes. We use the term compact translation models to refer to the resulting models, since the total number of parameters for modeling translations would be minimized naturally. We also present experiments in which compact translation models are used in Q&A retrieval. The main goal of our study is to investigate if and how compact translation models can improve the performance of Q&A retrieval.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The rest of this paper is organized as follows. The next section introduces a translation-based retrieval model and accompanying techniques used to retrieve query-relevant questions. Section 3 presents a number of empirical ways to select and eliminate unimportant words from parallel strings for training compact translation models. Section 4 summarizes the compact translation models we built for retrieval experiments. Section 5 presents and discusses the results of retrieval experiments. Section 6 presents related works. Finally, the last section concludes the paper and discusses future directions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This section introduces the translation-based language modeling approach to retrieval that has been used to bridge the lexical gap between queries and already-answered questions in this paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation-based Retrieval Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In the basic language modeling framework for retrieval (Ponte and Croft, 1998) , the similarity between a query Q and a document D for ranking may be modeled as the probability of the document language model M D built from D generating Q:", |
|
"cite_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 78, |
|
"text": "(Ponte and Croft, 1998)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation-based Retrieval Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "sim(Q, D) \u2248 P (Q|M D )", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Translation-based Retrieval Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Assuming that query words occur independently given a particular document language model, the query-likelihood P (Q|M D ) is calculated as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation-based Retrieval Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P (Q|M D ) = q\u2208Q P (q|M D )", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Translation-based Retrieval Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "where q represents a query word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation-based Retrieval Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To avoid zero probabilities in document language models, a mixture between a document-specific multinomial distribution and a multinomial distribution estimated from the entire document collection is widely used in practice:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation-based Retrieval Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P (Q|M D ) = q\u2208Q (1 \u2212 \u03bb) \u2022 P (q|M D ) +\u03bb \u2022 P (q|M C )", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Translation-based Retrieval Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "where 0 < \u03bb < 1 and M C represents a language model built from the entire collection. The probabilities P (w|M D ) and P (w|M C ) are calculated using maximum likelihood estimation. The basic language modeling framework does not address the issue of lexical gaps between queries and question. Berger and Lafferty (1999) viewed information retrieval as statistical document-query translation and introduced translation models to map query words to document words. Assuming that a translation model can be represented by a conditional probability distribution of translation T (\u2022|\u2022) between words, we can model P (q|M D ) in Equation 3 as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 293, |
|
"end": 319, |
|
"text": "Berger and Lafferty (1999)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation-based Retrieval Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P (q|M D ) = w\u2208D T (q|w)P (w|M D )", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Translation-based Retrieval Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "where w represents a document word. 3 The translation probability T (q|w) virtually represents the degree of relationship between query word q and document word w captured in a different, machine translation setting. Then, in the traditional information retrieval viewpoint, the use of translation models produce an implicit query expansion effect, since query words not in a document are mapped to related words in the document. This implies that translation-based retrieval models would make positive contributions to retrieval performance only when the pre-constructed translation models have reliable translation probability distributions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 37, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation-based Retrieval Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Obviously, we need to build a translation model in advance. Usually the IBM Model 1, developed in the statistical machine translation field (Brown et al., 1993) , is used to construct translation models for retrieval purposes in practice. Specifically, given a number of parallel strings, the IBM Model 1 learns the translation probability from a source word s to a target word t as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 160, |
|
"text": "(Brown et al., 1993)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "IBM Translation Model 1", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "T (t|s) = \u03bb \u22121 s N i c(t|s; J i )", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "IBM Translation Model 1", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where \u03bb s is a normalization factor to make the sum of translation probabilities for the word s equal to 1, N is the number of parallel string pairs, and J i is the ith parallel string pair. c(t|s; J i ) is calculated as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "IBM Translation Model 1", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "c(t|s; J i ) = P (t|s) P (t|s 1 ) + \u2022 \u2022 \u2022 + P (t|s n ) \u00d7f req t,J i \u00d7 f req s,J i (6)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "IBM Translation Model 1", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where {s 1 , . . . , s n } are words in the source text in J i . f req t,J i and f req s,J i are the number of times that t and s occur in J i , respectively. Given the initial values of T (t|s), Equations (5) and (6) are used to update T (t|s) repeatedly until the probabilities converge, in an EM-based manner.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "IBM Translation Model 1", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Note that the IBM Model 1 solely relies on word co-occurrence statistics obtained from parallel strings in order to learn translation probabilities. This implies that if parallel strings have unimportant words, a resulted translation model based on IBM Model 1 may contain unimportant words with nonzero translation probabilities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "IBM Translation Model 1", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We alleviate this drawback by eliminating unimportant words from parallel strings, avoiding them from being included in the conditional translation probability distribution. This naturally induces the construction of compact translation models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "IBM Translation Model 1", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The construction of statistical translation models previously discussed requires a corpus consisting of parallel strings. Since monolingual parallel texts are generally not available in real world, one must artificially generate a \"synthetic\" parallel corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Gathering Parallel Strings from Q&A Collections", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Question and answer as parallel pairs: The simplest approach is to directly employ questions and their answers in the collections by setting either as source strings and the other as target strings, with the assumption that a question and its corresponding answer are naturally parallel to each other. Formally, if we have a Q&A collection as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Gathering Parallel Strings from Q&A Collections", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "C = {D 1 , D 2 , . . . , D n },", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Gathering Parallel Strings from Q&A Collections", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where D i refers to an ith Q&A data consisting of a question q i and its answer a i , we can construct a parallel corpus C as {(q 1 , a 1 ), . . . , (q n , a n )}\u222a{(a 1 , q 1 ), . . . , (a n , q n )} = C where each element (s, t) refers to a parallel pair consisting of source string s and target string t. The number of parallel string samples would eventually be twice the size of the collections.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Gathering Parallel Strings from Q&A Collections", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Similar questions as parallel pairs: Jeon et al. 2005proposed an alternative way of automatically collecting a relatively larger set of parallel strings from Q&A collections. Motivated by the observation that many semantically identical questions can be found in typical Q&A collections, they used similarities between answers calculated by conventional word-based retrieval models to automatically group questions in a Q&A collection as pairs. Formally, two question strings q i and q j would be included in a parallel corpus C as {(q i , q j ), (q j , q i )} \u2282 C only if their answer strings a i and a j have a similarity higher than a pre-defined threshold value. The similarity is calculated as the reverse of the harmonic mean of ranks as sim(a i , a j ) = 1 2 ( 1 r j + 1 r i ), where r j and r i refer to the rank of the a j and a i when a i and a j are given as queries, respectively. This approach may artificially produce much more parallel string pairs for training the IBM Model 1 than the former approach, depending on the threshold value. 4 To our knowledge, there has not been any study comparing the effectiveness of the two approaches yet. In this paper, we try both approaches and compare the effectiveness in retrieval performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1053, |
|
"end": 1054, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Gathering Parallel Strings from Q&A Collections", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We adopt a term weight ranking approach to identify and eliminate unimportant words from parallel strings, assuming that a word in a string is unim- portant if it holds a relatively low significance in the document (Q&A pair) of which the string is originally taken from. Some issues may arise:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Eliminating Unimportant Words", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 How to assign a weight to each word in a document for term ranking?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Eliminating Unimportant Words", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 How much to remove as unimportant words from the ranked list?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Eliminating Unimportant Words", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The following subsections discuss strategies we use to handle each of the issues above.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Eliminating Unimportant Words", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In this section, the two different term weighting strategies are introduced. tf-idf: The use of tf-idf weighting on evaluating how unimportant a word is to a document seems to be a good idea to begin with. We have used the following formulas to calculate the weight of word w in document D:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Assigning Term Weights", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "tf -idf w,D = tf w,D \u00d7 idf w (7) tf w,D = f req w,D |D| , idf w = log |C| df w", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Assigning Term Weights", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where f req w,D refers to the number of times w occurs in D, |D| refers to the size of D (in words), |C| refers to the size of the document collection, and df w refers to the number of documents where w appears. Eventually, words with low tf-idf weights may be considered as unimportant.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Assigning Term Weights", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "TextRank: The task of term weighting, in fact, has been often applied to the keyword extraction task in natural language processing studies. As an alternative term weighting approach, we have used a variant of Mihalcea and Tarau (2004) 's Tex-tRank, a graph-based ranking model for keyword extraction which achieves state-of-the-art accuracy without the need of deep linguistic knowledge or domain-specific corpora.", |
|
"cite_spans": [ |
|
{ |
|
"start": 210, |
|
"end": 235, |
|
"text": "Mihalcea and Tarau (2004)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Assigning Term Weights", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Specifically, the ranking algorithm proceeds as follows. First, words in a given document are added as vertices in a graph G. Then, edges are added between words (vertices) if the words co-occur in a fixed-sized window. The number of co-occurrences becomes the weight of an edge. When the graph is constructed, the score of each vertex is initialized as 1, and the PageRank-based ranking algorithm is run on the graph iteratively until convergence. The TextRank score of a word w in document D at kth iteration is defined as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Assigning Term Weights", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "R k w,D = (1 \u2212 d) + d \u2022 \u2200j:(i,j)\u2208G e i,j \u2200l:(j,l)\u2208G e j,l R k\u22121 w,D", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "Assigning Term Weights", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where d is a damping factor usually set to 0.85, and e i,j is an edge weight between i and j.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Assigning Term Weights", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The assumption behind the use of the variant of TextRank is that a word is likely to be an important word in a document if it co-occurs frequently with other important words in the document. Eventually, words with low TextRank scores may be considered as unimportant. The main differences of TextRank compared to tf-idf is that it utilizes the context information of words to assign term weights. Figure 1 demonstrates that term weighting results of TextRank and tf-idf are greatly different. Notice that TextRank assigns low scores to words that co- occur only with stopwords. This implies that Tex-tRank weighs terms more \"strictly\" than the tf-idf approach, with use of contexts of words.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 397, |
|
"end": 405, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Assigning Term Weights", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Once a final score (either tf-idf or TextRank score) is obtained for each word, we create a list of words ranked in decreasing order of their scores and eliminate the ones at lower ranks as unimportant words. The question here is how to decide the proportion or quantity to be removed from the ranked list.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deciding the Quantity to be Removed from Ranked List", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Removing a fixed proportion: The first approach we have used is to decide the number of unimportant words based on the size of the original string. For our experiments, we manually vary the proportion to be removed as 25%, 50%, and 75%. For instance, if the proportion is set to 50% and an original string consists of ten words, at most five words would be remained as important words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deciding the Quantity to be Removed from Ranked List", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Using average score as threshold: We also have used an alternate approach to deciding the quantity. Instead of eliminating a fixed proportion, words are removed if their score is lower than the average score of all words in a document. This approach decides the proportion to be removed more flexibly than the former approach.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deciding the Quantity to be Removed from Ranked List", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We have initially built two parallel corpora from a Q&A collection 5 , denoted as (Q A) corpus and (Q Q) corpus henceforth, by varying the methods in which parallel strings are gathered (described in Section 2.2). The (Q A) corpus consists of 85,938 parallel string pairs, and the (Q Q) corpus contains 575,649 parallel string pairs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Building Compact Translation Models", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In order to build compact translation models, we have preprocessed the parallel corpus using different word elimination strategies so that unimportant words would be removed from parallel strings. We have also used a stoplist 6 consisting of 429 words to remove stopwords. The out-of-the-box GIZA++ 7 (Och and Ney, 2004) has been used to learn translation models using the pre-processed parallel corpus for our retrieval experiments. We have also trained initial translation models, using a parallel corpus from which only the stopwords are removed, to compare with the compact translation models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Building Compact Translation Models", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Eventually, the number of parameters needed for modeling translations would be minimized if unimportant words are eliminated with different ap-proaches. Table 1 and 2 shows the impact of various word elimination strategies on the construction of compact translation models using the (Q A) corpus and the (Q Q) corpus, respectively. The two tables report the size of the vocabulary contained and the average number of translations per word in the resulting compact translation models, along with percentage decreases with respect to the initial translation models in which only stopwords are removed. We make these observations:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 153, |
|
"end": 160, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Building Compact Translation Models", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 The translation models learned from the (Q Q) corpus have less vocabularies but more average translations per word than the ones learned from the (Q A) corpus. This result implies that a large amount of noise may have been created inevitably when a large number of parallel strings (pairs of similar questions) were artificially gathered from the Q&A collection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Building Compact Translation Models", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 The TextRank strategy tends to eliminate larger sets of words as unimportant words than the tf-idf strategy when a fixed proportion is removed, regardless of the corpus type. Recall that the TextRank approach assigns weights to words more strictly by using contexts of words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Building Compact Translation Models", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 The approach to remove words according to the average weight of a document (denoted as Avg.Score) tends to eliminate relatively larger portions of words as unimportant words than any of the fixed-proportion strategies, regardless of either the corpus type or the ranking strategy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Building Compact Translation Models", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Experiments have been conducted on a real world Q&A collection to demonstrate the effectiveness of compact translation models on Q&A retrieval.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Retrieval Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this section, four experimental settings for the Q&A retrieval experiments are described in detail. Data: For the experiments, Q&A data have been collected from the Science domain of Yahoo! Answers, one of the most popular community-based question answering service on the Web. We have obtained a total of 43,001 questions with a best answer (selected either by the questioner or by votes of other users) by recursively traversing subcategories of the Science domain, with up to 1,000 question pages retrieved. 8 Among the obtained Q&A pairs, 32 Q&A pairs have been randomly selected as the test set, and the remaining 42,969 questions have been the reference set to be retrieved. Each Q&A pair has three text fields: question title, question content, and answer. 9 The fields of each Q&A pair in the test set are considered as various test queries; the question title, the question content, and the answer are regarded as a short query, a long query, and a supplementary query, respectively. We have used long queries and supplementary queries only in the relevance judgment procedure. All retrieval experiments have been conducted using short queries only.", |
|
"cite_spans": [ |
|
{ |
|
"start": 514, |
|
"end": 515, |
|
"text": "8", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 767, |
|
"end": 768, |
|
"text": "9", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Relevance judgments: To find relevant Q&A pairs given a short query, we have employed a pooling technique used in the TREC conference series. We have pooled the top 40 Q&A pairs from each retrieval results generated by varying the retrieval algorithms, the search field, and the query type. Popular word-based models, including the Okapi BM25, query-likelihood language model, and previous translation-based models (Jeon et al., 2005) , have been used. 10 Relevance judgments have been done by two student volunteers (both fluent in English). Since many community-based question answering services present their search results in a hierarchical fashion (i.e. a list of relevant questions is shown first, and then the user chooses a specific question from the list to see its answers), a Q&A pair has been judged as relevant if its question is semantically similar to the query; neither quality nor rightness of the answer has not been considered. When a disagreement has been made between two volunteers, one of the authors has made the final judgment. As a result, 177 relevant Q&A pairs have been found in total for the 32 short queries.", |
|
"cite_spans": [ |
|
{ |
|
"start": 415, |
|
"end": 434, |
|
"text": "(Jeon et al., 2005)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 453, |
|
"end": 455, |
|
"text": "10", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Baseline retrieval models: The proposed ap-proach to Q&A retrieval using compact translation models (denoted as CTLM henceforth) is compared to three baselines: QLM: Query-likelihood language model for retrieval (equivalent to Equation 3, without use of translation models). This model represents wordbased retrieval models widely used in practice.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "TLM(Q Q): Translation-based language model for question retrieval (Jeon et al., 2005) . This model uses IBM Model 1 learned from the (Q Q) corpus of which stopwords are removed.", |
|
"cite_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 85, |
|
"text": "(Jeon et al., 2005)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "TLM(Q A): A variant of the translation-based approach. This model uses IBM model 1 learned from the (Q A) corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Evaluation metrics:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We have reported the retrieval performance in terms of Mean Average Precision (MAP) and Mean R-Precision (R-Prec).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Average Precision can be computed based on the precision at each relevant document in the ranking. Mean Average Precision is defined as the mean of the Average Precision values across the set of all queries:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "M AP (Q) = 1 |Q| q\u2208Q 1 m q m q k=1 P recision(R k ) (9)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "where Q is the set of test queries, m q is the number of relevant documents for a query q, R k is the set of ranked retrieval results from the top until rank position k, and P recision(R k ) is the fraction of relevant documents in R k (Manning et al., 2008) . R-Precision is defined as the precision after R documents have been retrieved where R is the number of relevant documents for the current query (Buckley and Voorhees, 2000) . Mean R-Precision is the mean of the R-Precisions across the set of all queries.", |
|
"cite_spans": [ |
|
{ |
|
"start": 236, |
|
"end": 258, |
|
"text": "(Manning et al., 2008)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 405, |
|
"end": 433, |
|
"text": "(Buckley and Voorhees, 2000)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We take MAP as our primary evaluation metric.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Preliminary retrieval experiments have been conducted using the baseline QLM and different fields of Q&A data as retrieval unit. Table 3 shows the effectiveness of each field.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 136, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The results imply that the question title field is the most important field in our Yahoo! Answers collection; this also supports the observation presented by Jeon et al. (2005) . Based on the preliminary observations, all retrieval models tested in this paper have ranked Q&A pairs according to the similarity scores between queries and question titles. Table 4 presents the comparison results of three baseline retrieval models and the proposed CTLMs. For each method, the best performance after empirical \u03bb parameter tuning according to MAP is presented.", |
|
"cite_spans": [ |
|
{ |
|
"start": 158, |
|
"end": 176, |
|
"text": "Jeon et al. (2005)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 354, |
|
"end": 361, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Notice that both the TLMs and CTLMs have outperformed the word-based QLM. This implies that word-based models that do not address the issue of lexical gaps between queries and questions often fail to retrieve relevant Q&A data that have little word overlap with queries, as noted by Jeon et al. (2005) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 283, |
|
"end": 301, |
|
"text": "Jeon et al. (2005)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Moreover, notice that the proposed CTLMs have achieved significantly better performances in all evaluation metrics than both QLM and TLMs, regardless of the parallel corpus in which the incorporated translation models are trained from. This is a clear indication that the use of compact translation models built with appropriate word elimination strategies is effective in closing the query-question lexical gaps for improving the performance of question retrieval in the context of language modeling framework.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Note that the retrieval performance varies by the type of training corpus; CTLM(Q A) has outperformed CTLM(Q Q) significantly. This proves the statement we made earlier that the (Q Q) corpus would contain much noise since the translation models learned from the (Q Q) corpus tend to have smaller vocabulary sizes but significantly more average translations per word than the ones learned from the (Q A) corpus. Table 5 and 6 show the effect of various word elimination strategies on the retrieval performance of CTLMs in which the incorporated compact translation models are trained from the (Q Q) corpus and the (Q A) corpus, respectively. It is interesting to note that the importance of modifications in word elimination strategies also varies by the type of training corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 411, |
|
"end": 418, |
|
"text": "Table 5", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The retrieval results indicate that when the translation model is trained from the \"less noisy\" (Q A) corpus, eliminating a relatively large proportions of words may hurt the retrieval performance of CTLM. In the case when the translation model is trained from the \"noisy\" (Q Q) corpus, a better retrieval performance may be achieved if words are eliminated appropriately to a certain extent.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "In terms of weighting scheme, the TextRank approach, which is more \"strict\" than tf-idf in eliminating unimportant words, has led comparatively higher retrieval performances on all levels of removal quantity when the translation model has been trained from the \"noisy\" (Q Q) corpus. On the contrary, the \"less strict\" tf-idf approach has led better performances when the translation model has been trained from the \"less noisy\" (Q A) corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "In summary, the results imply that the performance of translation-based retrieval models can be significantly improved when strategies for building of compact translation models are chosen properly, regarding the expected noise level of the parallel corpus for training the translation models. In a case where a noisy parallel corpus is given for training of translation models, it is better to get rid of noise as much as possible by using \"strict\" term weighting algorithms; when a less noisy parallel corpus is given for building the translation models, a tolerant approach would yield better retrieval performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Our work is most closely related to Jeon et al. (2005)'s work, which addresses the issue of word mismatch between queries and questions in large online Q&A collections by using translationbased methods. Apart from their work, there have been some related works on applying translationbased methods for retrieving FAQ data. Berger et al. (2000) report some of the earliest work on FAQ retrieval using statistical retrieval models, including translation-based approaches, with a small set of FAQ data. Soricut and Brill (2004) present an answer passage retrieval system that is trained from 1 million FAQs collected from the Web using translation methods. Riezler et al. (2007) demonstrate the advantages of translation-based approach to answer retrieval by utilizing a more complex translation model also trained from a large amount of data extracted from FAQs on the Web. Although all of these translation-based approaches are based on the statistical translation models, including the IBM Model 1, none of them focus on addressing the noise issues in translation models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 323, |
|
"end": 343, |
|
"text": "Berger et al. (2000)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 500, |
|
"end": 524, |
|
"text": "Soricut and Brill (2004)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 654, |
|
"end": 675, |
|
"text": "Riezler et al. (2007)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Works", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Bridging the query-question gap has been a major issue in retrieval models for large online Q&A collections. In this paper, we have shown that the performance of translation-based retrieval on real online Q&A collections can be significantly improved by using compact translation models of which the noise (unimportant word translations) is properly reduced. We have also observed that the performance enhancement may be achieved by choosing the appropriate strategies regarding the strictness of various term weighting algorithms and the expected noise level of the parallel data for learning such translation models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Future work will focus on testing the effectiveness of the proposed method on a larger set of Q&A collections with broader domains. Since the proposed approach cannot handle many-to-one or oneto-many word transformations, we also plan to investigate the effectiveness of phrase-based translation models in closing gaps between queries and questions for further enhancement of Q&A retrieval.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "http://answers.yahoo.com/ 2 http://qna.live.com/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The formulation of our retrieval model is basically equivalent to the approach of Jeon et al.(2005).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We have empirically set the threshold (0.05) for our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Details on this data will be introduced in the next section. 6 http://truereader.com/manuals/onix/stopwords1.html 7 http://www.fjoch.com/GIZA++.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Yahoo! Answers did not expose additional question pages to external requests at the time of collecting the data.9 When collecting parallel strings from the Q&A collection, we have put together the question title and the question content as one question string.10 The retrieval model using compact translation models has not been used in the pooling procedure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was supported by Microsoft Research Asia. Any opinions, findings, and conclusions or recommendations expressed above are those of the authors and do not necessarily reflect the views of the sponsor.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Bridging the Lexical Chasm: Statistical Approaches to Answer-Finding", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Berger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rich", |
|
"middle": [], |
|
"last": "Caruana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Cohn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of the 23rd Annual International ACM SI-GIR Conference on Research and Development in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "192--199", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Berger, Rich Caruana, David Cohn, Dayne Fre- itag, and Vibhu Mittal. 2000. Bridging the Lexical Chasm: Statistical Approaches to Answer-Finding. In Proceedings of the 23rd Annual International ACM SI- GIR Conference on Research and Development in In- formation Retrieval, pages 192-199.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Information Retrieval as Statistical Translation", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Berger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Lafferty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proceedings of the 22nd Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "222--229", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Berger and John Lafferty. 1999. Information Re- trieval as Statistical Translation. In Proceedings of the 22nd Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 222-229.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "The Mathematics of Statistical Machine Translation: Parameter Estimation. Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"Della" |
|
], |
|
"last": "Vincent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Pietra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Della Pietra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mercer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "", |
|
"volume": "19", |
|
"issue": "", |
|
"pages": "263--311", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter F. Brown, Vincent J. Della Pietra, Stephen A. Della Pietra, and Robert L. Mercer. 1993. The Mathematics of Statistical Machine Translation: Parameter Estima- tion. Computational Linguistics, 19(2):263-311.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Evaluating Evaluation Measure Stability", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Buckley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ellen", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Voorhees", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of the 23rd Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "33--40", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Buckley and Ellen M. Voorhees. 2000. Evaluating Evaluation Measure Stability. In Proceedings of the 23rd Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 33-40.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Finding Similar Questions in Large Question and Answer Archives", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"Bruce" |
|
], |
|
"last": "Jiwoon Jeon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joon Ho", |
|
"middle": [], |
|
"last": "Croft", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 14th ACM International Conference on Information and Knowledge Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "84--90", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwoon Jeon, W. Bruce Croft, and Joon Ho Lee. 2005. Finding Similar Questions in Large Question and An- swer Archives. In Proceedings of the 14th ACM Inter- national Conference on Information and Knowledge Management, pages 84-90.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Introduction to Information Retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prabhakar", |
|
"middle": [], |
|
"last": "Raghavan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher D. Manning, Prabhakar Raghavan, and Hin- rich Sch\u00fctze. 2008. Introduction to Information Re- trieval. Cambridge University Press.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "TextRank: Bringing Order into Text", |
|
"authors": [ |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Tarau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 2004 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "404--411", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rada Mihalcea and Paul Tarau. 2004. TextRank: Bring- ing Order into Text. In Proceedings of the 2004 Con- ference on Empirical Methods in Natural Language Processing, pages 404-411.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A Systematic Comparison of Various Statistical Alignment Models", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Franz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Och", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Computational Linguistics", |
|
"volume": "29", |
|
"issue": "1", |
|
"pages": "19--51", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Franz J. Och and Hermann Ney. 2003. A Systematic Comparison of Various Statistical Alignment Models. Computational Linguistics, 29(1):19-51.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A Language Modeling Approach to Information Retrieval", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Jay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"Bruce" |
|
], |
|
"last": "Ponte", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Croft", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the 21st Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "275--281", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jay M. Ponte and W. Bruce Croft. 1998. A Language Modeling Approach to Information Retrieval. In Pro- ceedings of the 21st Annual International ACM SIGIR Conference on Research and Development in Informa- tion Retrieval, pages 275-281.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Statistical Machine Translation for Query Expansion in Answer Retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Riezler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Vasserman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ioannis", |
|
"middle": [], |
|
"last": "Tsochantaridis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vibhu", |
|
"middle": [], |
|
"last": "Mittal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "464--471", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stefan Riezler, Alexander Vasserman, Ioannis Tsochan- taridis, Vibhu Mittal, and Yi Liu. 2007. Statistical Machine Translation for Query Expansion in Answer Retrieval. In Proceedings of the 45th Annual Meet- ing of the Association for Computational Linguistics, pages 464-471.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Automatic Question Answering: Beyond the Factoid", |
|
"authors": [ |
|
{ |
|
"first": "Radu", |
|
"middle": [], |
|
"last": "Soricut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Brill", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 2004 Human Language Technology and Conference of the North American Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "57--64", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Radu Soricut and Eric Brill. 2004. Automatic Question Answering: Beyond the Factoid. In Proceedings of the 2004 Human Language Technology and Confer- ence of the North American Chapter of the Association for Computational Linguistics, pages 57-64.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "Term weighting results of tf-idf and TextRank (window=3). Weighting is done on underlined words only.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"text": "Impact of various word elimination strategies on translation model construction using (Q A) corpus.", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Corpus: (Q Q)</td><td colspan=\"2\">Vocabulary Size (%chg)</td><td colspan=\"2\">Average Translations (%chg)</td></tr><tr><td/><td>tf-idf</td><td>TextRank</td><td>tf-idf</td><td>TextRank</td></tr><tr><td>Initial</td><td>34,485</td><td/><td>442</td></tr><tr><td>25%Removal</td><td colspan=\"2\">34,374 (\u22070.3%) 26,900 (\u220722.0%)</td><td colspan=\"2\">437 (\u22071.1%) 282 (\u220736.2%)</td></tr><tr><td>50%Removal</td><td colspan=\"2\">34,262 (\u22070.6%) 26,421 (\u220723.4%)</td><td colspan=\"2\">423 (\u22074.3%) 274 (\u220738.0%)</td></tr><tr><td>75%Removal</td><td colspan=\"4\">32,813 (\u22074.8%) 23,354 (\u220732.3%) 288 (\u220734.8%) 213 (\u220751.8%)</td></tr><tr><td>Avg.Score</td><td colspan=\"4\">28,613 (\u220717.0%) 16,492 (\u220752.2%) 163 (\u220763.1%) 164 (\u220762.9%)</td></tr></table>" |
|
}, |
|
"TABREF2": { |
|
"text": "", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>" |
|
}, |
|
"TABREF4": { |
|
"text": "Preliminary retrieval results.", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Model</td><td>MAP</td><td>R-Prec</td></tr><tr><td/><td>(%chg)</td><td>(%chg)</td></tr><tr><td>QLM</td><td>0.1031</td><td>0.2396</td></tr><tr><td>TLM(Q Q)*</td><td>0.1121</td><td>0.2251</td></tr><tr><td/><td>( 9%)</td><td>(\u22076%)</td></tr><tr><td>CTLM(Q Q)</td><td>0.1415</td><td>0.2425</td></tr><tr><td/><td>( 37%)</td><td>( 1%)</td></tr><tr><td>TLM(Q A)</td><td>0.1935</td><td>0.3135</td></tr><tr><td/><td colspan=\"2\">( 88%) ( 31%)</td></tr><tr><td>CTLM(Q A)</td><td>0.2095</td><td>0.3585</td></tr><tr><td/><td colspan=\"2\">( 103%) ( 50%)</td></tr></table>" |
|
}, |
|
"TABREF5": { |
|
"text": "Comparisons with three baseline retrieval models. * indicates that it is equivalent to Jeon et al.(2005)'s approach. MAP improvements of CTLMs have been tested to be statistically significant using paired t-test.", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>" |
|
}, |
|
"TABREF7": { |
|
"text": "Contributions of various word elimination strategies on MAP performance of CTLM(Q Q).", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>(Q A)</td><td colspan=\"2\">MAP (%chg)</td></tr><tr><td/><td>tf-idf</td><td>TextRank</td></tr><tr><td>Initial</td><td>0.1935</td></tr><tr><td colspan=\"3\">25%Rmv 0.2095 ( 8.3) 0.1733 (\u220710.4)</td></tr><tr><td colspan=\"3\">50%Rmv 0.2085 ( 7.8) 0.1623 (\u220716.1)</td></tr><tr><td colspan=\"3\">75%Rmv 0.1449 (\u220725.1) 0.1515 (\u220721.7)</td></tr><tr><td colspan=\"3\">Avg.Score 0.1168 (\u220739.6) 0.1124 (\u220741.9)</td></tr></table>" |
|
}, |
|
"TABREF8": { |
|
"text": "Contributions of various word elimination strategies on MAP performance of CTLM(Q A).", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |