|
{ |
|
"paper_id": "C14-1010", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:22:20.031060Z" |
|
}, |
|
"title": "Group Non-negative Matrix Factorization with Natural Categories for Question Retrieval in Community Question Answer Archives", |
|
"authors": [ |
|
{ |
|
"first": "Guangyou", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "National Laboratory of Pattern Recognition", |
|
"institution": "Chinese Academy of Sciences", |
|
"location": { |
|
"addrLine": "95 Zhongguancun East Road", |
|
"postCode": "100190", |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yubo", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "National Laboratory of Pattern Recognition", |
|
"institution": "Chinese Academy of Sciences", |
|
"location": { |
|
"addrLine": "95 Zhongguancun East Road", |
|
"postCode": "100190", |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Daojian", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "National Laboratory of Pattern Recognition", |
|
"institution": "Chinese Academy of Sciences", |
|
"location": { |
|
"addrLine": "95 Zhongguancun East Road", |
|
"postCode": "100190", |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "National Laboratory of Pattern Recognition", |
|
"institution": "Chinese Academy of Sciences", |
|
"location": { |
|
"addrLine": "95 Zhongguancun East Road", |
|
"postCode": "100190", |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Community question answering (CQA) has become an important service due to the popularity of CQA archives on the web. A distinctive feature is that CQA services usually organize questions into a hierarchy of natural categories. In this paper, we focus on the problem of question retrieval and propose a novel approach, called group non-negative matrix factorization with natural categories (GNMFNC). This is achieved by learning the category-specific topics for each category as well as shared topics across all categories via a group non-negative matrix factorization framework. We derive an efficient algorithm for learning the factorization, analyze its complexity, and provide proof of convergence. Experiments are carried out on a real world CQA data set from Yahoo! Answers. The results show that our proposed approach significantly outperforms various baseline methods and achieves the state-of-the-art performance for question retrieval.", |
|
"pdf_parse": { |
|
"paper_id": "C14-1010", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Community question answering (CQA) has become an important service due to the popularity of CQA archives on the web. A distinctive feature is that CQA services usually organize questions into a hierarchy of natural categories. In this paper, we focus on the problem of question retrieval and propose a novel approach, called group non-negative matrix factorization with natural categories (GNMFNC). This is achieved by learning the category-specific topics for each category as well as shared topics across all categories via a group non-negative matrix factorization framework. We derive an efficient algorithm for learning the factorization, analyze its complexity, and provide proof of convergence. Experiments are carried out on a real world CQA data set from Yahoo! Answers. The results show that our proposed approach significantly outperforms various baseline methods and achieves the state-of-the-art performance for question retrieval.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Community question answering (CQA) such as Yahoo! Answers 1 and Quora 2 , has become an important service due to the popularity of CQA archives on the web. To make use of the large-scale questions and their answers, it is critical to have functionality of helping users to retrieve previous answers (Duan et al., 2008) . Typically, such functionality is achieved by first retrieving the historical questions that best match a user's queried question, and then using answers of these returned questions to answer the queried question. This is what we called question retrieval in this paper.", |
|
"cite_spans": [ |
|
{ |
|
"start": 299, |
|
"end": 318, |
|
"text": "(Duan et al., 2008)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The major challenge for question retrieval, as for most information retrieval tasks, is the lexical gap between the queried questions and the historical questions in the archives. For example, if a queried question contains the word \"company\" but a relevant historical question instead contains the word \"firm\", then there is a mismatch and the historical question may not be easily distinguished from an irrelevant one. To solve the lexical gap problem, most researchers focused on translation-based approaches since the relationships between words (or phrases) can be explicitly modeled through word-to-word (or phrases) translation probabilities (Jeon et al., 2005; Riezler et al., 2007; Xue et al., 2008; Lee et al., 2008; Bernhard and Gurevych, 2009; Singh, 2012) . However, these existing methods model the relevance ranking without considering the category-specific and shared topics with natural categories, it is not clear whether this information is useful for question retrieval.", |
|
"cite_spans": [ |
|
{ |
|
"start": 649, |
|
"end": 668, |
|
"text": "(Jeon et al., 2005;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 669, |
|
"end": 690, |
|
"text": "Riezler et al., 2007;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 691, |
|
"end": 708, |
|
"text": "Xue et al., 2008;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 709, |
|
"end": 726, |
|
"text": "Lee et al., 2008;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 727, |
|
"end": 755, |
|
"text": "Bernhard and Gurevych, 2009;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 756, |
|
"end": 768, |
|
"text": "Singh, 2012)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A distinctive feature of question-answer pairs in CQA is that CQA services usually organize questions into a hierarchy of natural categories. For example, Yahoo! Answers contains a hierarchy of 26 categories at the first level and more than 1262 subcategories at the leaf level. When a user asks a question, the user is typically required to choose a category label for the question from a predefined hierarchy. Questions in the predefined hierarchy usually share certain generic topics while questions in different categories have their specific topics. For example, questions in categories \"Arts & Humanities\" and \"Beauty & Style\" may share the generic topic of \"dance\" but they also have the category-specific topics of \"poem\" and \"wearing\", respectively. Inspired by the above observation, we propose a novel approach, called group non-negative matrix factorization with natural categories (GNMFNC). GNMFNC assumes that there exists a set of categoryspecific topics for each of the category, and there also exists a set of shared topics for all of the categories. Each question in CQA is specified by its category label, category-specific topics, as well as shared topics. In this way, the large-scale question retrieval problem can be decomposed into small-scale subproblems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In GNMFNC, questions in each category are represented as a term-question matrix. The term-question matrix is then approximated as the product of two matrices: one matrix represents the category-specific topics as well as the shared topics, and the other matrix denotes the question representation based on topics. An objective function is defined to measure the goodness of prediction of the data with the model. Optimization of the objective function leads to the automatic discovery of topics as well as the topic representation of questions. Finally, we calculate the relevance ranking between the queried questions and the historical questions in the latent topic space.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Past studies by (Cao et al., 2009; Cao et al., 2010; Ming et al., 2010; Ji et al., 2012; confirmed a significant retrieval improvement by adding the natural categories into various existing retrieval models. However, all these previous work regarded natural categories individually without considering the relationships among them. On the contrary, this paper can effectively capture the relationships between the shared aspects and the category-specific individual aspects with natural categories via a group non-negative matrix factorization framework. Also, our work models the relevance ranking in the latent topic space rather than using the existing retrieval models. To date, no attempts have been made regarding group non-negative matrix factorization in studies of question retrieval, which remains an under-explored area.", |
|
"cite_spans": [ |
|
{ |
|
"start": 16, |
|
"end": 34, |
|
"text": "(Cao et al., 2009;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 35, |
|
"end": 52, |
|
"text": "Cao et al., 2010;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 53, |
|
"end": 71, |
|
"text": "Ming et al., 2010;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 72, |
|
"end": 88, |
|
"text": "Ji et al., 2012;", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The remainder of this paper is organized as follows. Section 2 describes our proposed group nonnegative matrix factorization with natural categories for question retrieval. Section 3 presents the experimental results. In Section 4, we conclude with ideas for future research.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "2 Group Non-negative Matrix Factorization with Natural Categories", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In CQA, all questions are usually organized into a hierarchy of categories. When a user asks a question, the user is typically required to choose a category label for the question from a predefined hierarchy of categories. Hence, each question in CQA has a category label. Suppose that we are given a question collection D in CQA archive with size N , containing terms from a vocabulary V with size M . A question d is represented as a vector d \u2208 R M where each entry denotes the weight of the corresponding term, for example tf-idf is used in this paper. Let C = {c 1 , c 2 , \u2022 \u2022 \u2022 , c P } denote the set of categories (subcategories) of question collection D, where P is the number of categories (subcategories). The question collection D is organized into P groups according to their category labels and can be represented as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "D = {D 1 , D 2 , \u2022 \u2022 \u2022 , D P }. D p = {d (p) 1 , \u2022 \u2022 \u2022 , d (p)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Np } \u2208 R M \u00d7Np is the term-question matrix corresponding to category c p , in which each row stands for a term and each column stands for a question. N p is the number of questions in category c p such that Ks+Kp) be the term-topic matrix corresponding to category c p , where K s is the number of shared topics, K p is the number of category-specific topics corresponding to category c p , and p \u2208 [1, P ]. Term-topic matrix U s can be represented as", |
|
"cite_spans": [ |
|
{ |
|
"start": 207, |
|
"end": 213, |
|
"text": "Ks+Kp)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "\u2211 P p=1 N p = N . Let U \u2032 p = [U s , U p ] \u2208 R M \u00d7(", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "U s = [u (s) 1 , \u2022 \u2022 \u2022 , u (s)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Ks ] \u2208 R M \u00d7Ks , in which each column corresponds to a shared topic. While the term-topic matrix U p can be represented as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "U p = [u (p) 1 , \u2022 \u2022 \u2022 , u (p) Kp ] \u2208 R M \u00d7Kp . The total number of topics in the question collection D is K = K s + P K p . Let V p = [v (p) 1 , \u2022 \u2022 \u2022 , v (p)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Np ] \u2208 R (Ks+Kp)\u00d7Np be the topic-question matrix corresponding to category c p , in which each column denotes the question representation in the topic space. We also denote ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "V T p = [H T p , W T p ],", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "O = P \u2211 p=1 { \u03bbp Dp \u2212 [Us, Up]Vp 2 F + R(Us, Up) } (1) where \u03bb p \u2225D p \u2225 \u22122 F . R(U s , U p )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "is a regularization term used to penalize the \"similarity\" between the shared topics and category-specific topics through U s and U p .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "In this paper, we aim to ensure that matrix U s captures only shared topics and matrix U p captures only the category-specific topics. For example, if matrices U s and U p are mutually orthogonal, we have U T s U p = 0. To impose this constraint, we attempt to minimize the sum-of-squares of entries of the matrix", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "U T s U p (e.g., \u2225U T s U p \u2225 2 F which uniformly optimizes each entry of U T s U p ). With this choice, the regularization term of R(U s , U p ) is given by R(Us, Up) = P \u2211 p=1 \u03b1p U T s Up 2 F + P \u2211 l=1,l\u0338 =p \u03b2 l U T p U l 2 F (2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where \u03b1 p and \u03b2 l are the regularization parameters, \u2200p \u2208", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "[1, P ], \u2200l \u2208 [1, P ].", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Learning the objective function in equation 1involves the following optimization problem:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "min Us,Up,Vp\u22650 L = O + \u03c31 U T s 1M \u2212 1K s 2 F + \u03c32 U T p 1M \u2212 1K p 2 F + \u03c33 Vp1N p \u2212 1K s +Kp 2 F", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where \u03c3 1 , \u03c3 2 and \u03c3 3 are the shrinkage regularization parameters. Based on the shrinkage methodology, we can approximately satisfy the normalization constraints for each column of [U s , U p ] and V T p by guaranteeing the optimization converges to a stationary point.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We present the solution to the GNMFNC optimization problem in equation 3as the following theorem. The theoretical aspects of the optimization are presented in the next subsection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning Algorithm", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Theorem 2.1. Updating U s , U p and V p using equations (4)\u223c(6) corresponds to category c p will monotonically decrease the objective function in equation 3until convergence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning Algorithm", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "Us \u2190 Us \u2022 [ \u2211 P p=1 \u03bbpDpH T p ] [ \u2211 P p=1 \u03bbp[Us, Up]VpH T p + \u03b1pUpU T p Us ] (4) Up \u2190 Up \u2022 [ \u03bbpDpW T p ] [ \u03bbp[Us, Up]VpW T p + \u03b1pUsU T s Up + \u2211 P l=1,l\u0338 =p \u03b2 l U l U T l Up ] (5) Vp \u2190 Vp \u2022 [ \u03bbpD T p [Us, Up] ] [ \u03bbpV T p [Us, Up] T [Us, Up] ]", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Learning Algorithm", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where operator \u2022 is element-wise product and [\u2022] [\u2022] is element-wise division. Based on Theorem 2.1, we note that multiplicative update rules given by equations (4)\u223c(6) are obtained by extending the updates of standard NMF (Lee and Seung, 2001) . A number of techniques can be used here to optimize the objective function in equation 3, such as alternating least squares (Kim and Park, 2008) , the active set method (Kim and Park, 2008) , and the projected gradients approach (Lin, 2007) . Nonetheless, the multiplicative updates derived in this paper have reasonably fast convergence behavior as shown empirically in the experiments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 45, |
|
"end": 48, |
|
"text": "[\u2022]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 223, |
|
"end": 244, |
|
"text": "(Lee and Seung, 2001)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 371, |
|
"end": 391, |
|
"text": "(Kim and Park, 2008)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 416, |
|
"end": 436, |
|
"text": "(Kim and Park, 2008)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 476, |
|
"end": 487, |
|
"text": "(Lin, 2007)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning Algorithm", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In this subsection, we give the theoretical analysis of the optimization, convergence and computational complexity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Theoretical Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Without loss of generality, we only show the optimization of U s and formulate the Lagrange function with constraints as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Theoretical Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L(U s ) = O + \u03c3 1 U T s 1 M \u2212 1 Ks 2 F + Tr(\u03a8 s U T s )", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Theoretical Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "where Tr(\u2022) denotes the trace of a matrix, \u03a8 s \u2208 R Ks\u00d7Ks is the Lagrange multiplier for the nonnegative constraint", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Theoretical Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "U s \u2265 0. The partial derivative of L(U s ) w.r.t. U s is \u25bd Us L(U s ) = \u22122 P \u2211 p=1 \u03bb p D p H T p + 2 P \u2211 p=1 \u03bb p [U s , U p ]V p H T p + 2 P \u2211 p=1 \u03b1 p U p U T p U s + 2\u03c3 1 U s \u2212 2\u03c3 1 + \u03a8 s", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "Theoretical Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Using the Karush-Kuhn-Tucker (KKT) (Boyd and Vandenberghe, 2004) condition", |
|
"cite_spans": [ |
|
{ |
|
"start": 35, |
|
"end": 64, |
|
"text": "(Boyd and Vandenberghe, 2004)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Theoretical Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03a8 s \u2022 U s = 0, we obtain \u25bd Us L(U s ) \u2022 U s = { \u2212 \u2211 P p=1 \u03bb p D p H T p + \u2211 P p=1 \u03bb p [U s , U p ]V p H T p + \u2211 P p=1 \u03b1 p U p U T p U s + \u03c3 1 U s \u2212 \u03c3 1 } \u2022 U s = 0", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "Theoretical Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "After normalization of U s , the terms \u03c3 1 U s and \u03c3 1 are in fact equal. They can be safely ignored from the above formula without influencing convergence. This leads to the updating rule for U s in equation 4. Following the similar derivations as shown above, we can obtain the updating rules for the rest variables U p and V p in GNMFNC optimization, as shown in equations (5) and (6).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Theoretical Analysis", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "In this subsection, we prove the convergence of multiplicative updates given by equations (4)\u223c(6). We first introduce the definition of auxiliary function as follows.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convergence Analysis", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "Definition 2.1. F(X, X \u2032 ) is an auxiliary function for L(X) if L(X) \u2264 F(X, X \u2032 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convergence Analysis", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "and equality holds if and only if L(X) = F(X, X).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convergence Analysis", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "Lemma 2.1. (Lee and Seung, 2001) If F is an auxiliary function for L, L is non-increasing under the update", |
|
"cite_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 32, |
|
"text": "(Lee and Seung, 2001)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convergence Analysis", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "X (t+1) = arg min X F(X, X (t) ) Proof. By Definition 2.1, L(X (t+1) ) \u2264 F(X (t+1) , X (t) ) \u2264 F(X (t) , X (t) ) = L(X (t) ) Theorem 2.2. Let L(U (t+1) s ) denote the sum of all terms in L that contain U (t+1) s , the following function is an auxiliary function for L(U (t+1) s ) F(U (t+1) s , U (t) s ) = L(U (t) s ) + (U (t+1) s \u2212 U (t) s ) \u25bd U (t) s L(U (t) s ) + 1 2 (U (t+1) s \u2212 U (t) s ) 2 P(U (t) s ) (10) P(U (t) s ) = \u2211 ij [ \u2211 P p=1 \u03bbp[U (t) s , Up]VpW T p + \u03b1pUpU T p U (t) s + \u03c31U (t) s ] ij \u2211 ij [U (t) s ]ij", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convergence Analysis", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convergence Analysis", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "\u25bd U (t) s L(U (t) s ) is the first-order derivative of L(U (t) s ) with respect to U (t)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convergence Analysis", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "s . Theorem 2.2 can be proved similarly to (Lee and Seung, 2001) ", |
|
"cite_spans": [ |
|
{ |
|
"start": 43, |
|
"end": 64, |
|
"text": "(Lee and Seung, 2001)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convergence Analysis", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "by validating L(U (t+1) s ) \u2264 F (U (t+1) s , U (t) s ), L(U (t+1) s ) = F(U (t+1) s , U (t+1) s )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convergence Analysis", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": ", and the Hessian matrix", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convergence Analysis", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "\u25bd \u25bd U (t+1) s F(U (t+1) s , U (t)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convergence Analysis", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "s ) \u227d 0. Due to limited space, we omit the details of the validation. addition multiplication division overall GNMFNC: ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convergence Analysis", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "Us P (3M NpKs + M NpKp + M K 2 s ) P (3M NpKs + M NpKp + M K 2 s ) M Ks O(P M NpKmax) GNMFNC: Up 3M NpKp + M NpKs + P M 2 K \u2032 3M NpKp + M NpKs + P M 2 K \u2032 M Kp O(P M RK \u2032 ) GNMFNC: Vp 3M NpK \u2032 3M NpK \u2032 NpK \u2032 O(M NpK \u2032 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convergence Analysis", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "(t+1) s , U (t) s ) with respect to U (t+1) s . When setting \u25bd U (t+1) s F(U (t+1) s , U (t)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convergence Analysis", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "s ) = 0, we get the following updating rule", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convergence Analysis", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "U (t+1) s \u2190 U (t) s \u2022 [ \u2211 P p=1 \u03bbpDpH T p + \u03c31 ] [ \u2211 P p=1 \u03bbp[U (t) s , Up]VpW T p + \u03b1pUpU T p U (t) s + \u03c31U (t) s ]", |
|
"eq_num": "(11)" |
|
} |
|
], |
|
"section": "Convergence Analysis", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "which is consistent with the updating rule derived from the KKT conditions aforementioned. By Lemma 2.1 and Theorem 2.2, we have L(U", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convergence Analysis", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "(0) s ) = F(U (0) s , U (0) s ) \u2265 F(U (1) s , U (0) s ) \u2265 F(U (1) s , U (1) s ) = L(U (1) s ) \u2265 \u2022 \u2022 \u2022 \u2265 L(U (Iter) s ),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convergence Analysis", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "where Iter is the number of iterations. Therefore, U s is monotonically decreasing. Since the objective function L is lower bounded by 0, the correctness and convergence of Theorem 2.1 is validated.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convergence Analysis", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "In this subsection, we discuss the time computational complexity of the proposed algorithm GNMFNC. Besides expressing the complexity of the algorithm using big O notation, we also count the number of arithmetic operations to provide more details about running time. We show the results in Table 1 , where", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 289, |
|
"end": 296, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Computational Complexity", |
|
"sec_num": "2.3.2" |
|
}, |
|
{ |
|
"text": "K max = max{K s , K p }, K \u2032 = K s + K p and R = max{M, N p }.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computational Complexity", |
|
"sec_num": "2.3.2" |
|
}, |
|
{ |
|
"text": "Suppose the multiplicative updates stop after Iter iterations, the time cost of multiplicative updates then becomes O(Iter \u00d7 P M RK \u2032 ). We set Iter = 100 empirically in rest of the paper. Therefore, the overall running time of GNMFNC is linear with respect to the size of word vocabulary, the number of questions and categories.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computational Complexity", |
|
"sec_num": "2.3.2" |
|
}, |
|
{ |
|
"text": "The motivation of incorporating matrix factorization into relevance ranking is to learn the word relationships and reduce the \"lexical gap\" (Zhou et al., 2013a) . To do so, given a queried question q with category label c p from Yahoo! Answers, we first represent it in the latent topic space as", |
|
"cite_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 160, |
|
"text": "(Zhou et al., 2013a)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Relevance Ranking", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "v q , v q = arg min v\u22650 \u2225q \u2212 [U s , U p ]v\u2225 2 2 (12)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Relevance Ranking", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "where vector q is the tf-idf representation of queried question q in the term space.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Relevance Ranking", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "For each historical question d (indexed by r) in question collection D, with representation v d = r-th column of V, we compute its similarity with queried question v q as following", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Relevance Ranking", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "s topic (q, d) = < v q , v d > \u2225v q \u2225 2 \u2022 \u2225v d \u2225 2", |
|
"eq_num": "(13)" |
|
} |
|
], |
|
"section": "Relevance Ranking", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "The latent topic space score s topic (q, d) is combined with the conventional term matching score s term (q, d) for final relevance ranking. There are several ways to conduct the combination. Linear combination is a simple and effective way. The final relevance ranking score s(q, d) is:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Relevance Ranking", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "s(q, d) = \u03b3s topic (q, d) + (1 \u2212 \u03b3)s term (q, d)", |
|
"eq_num": "(14)" |
|
} |
|
], |
|
"section": "Relevance Ranking", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "where \u03b3 \u2208 [0, 1] is the parameter which controls the relative importance of the latent topic space score and term matching score. s term (q, d) can be calculated with any of the conventional relevance models such as BM25 (Robertson et al., 1994) and LM (Zhai and Lafferty, 2001 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 221, |
|
"end": 245, |
|
"text": "(Robertson et al., 1994)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 253, |
|
"end": 277, |
|
"text": "(Zhai and Lafferty, 2001", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Relevance Ranking", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "We collect the data set from Yahoo! Answers and use the getByCategory function provided in Yahoo! Answers API 3 to obtain CQA threads from the Yahoo! site. More specifically, we utilize the resolved questions and the resulting question repository that we use for question retrieval contains 2,288,607 questions. Each resolved question consists of four parts: \"question title\", \"question description\", \"question answers\" and \"question category\". We only use the \"question title\" and \"question category\" parts, which have been widely used in the literature for question retrieval (Cao et al., 2009; Cao et al., 2010) . There are 26 first-level categories in the predefined natural hierarchy, i.e., each historical question is categorized into one of the 26 categories. The categories include \"Arts & Humanities\", \"Beauty & Style\", \"Business & Finance\", etc.", |
|
"cite_spans": [ |
|
{ |
|
"start": 578, |
|
"end": 596, |
|
"text": "(Cao et al., 2009;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 597, |
|
"end": 614, |
|
"text": "Cao et al., 2010)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Set and Evaluation Metrics", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In order to evaluate our approach, we randomly select 2,000 questions as queried questions from the above data collection to construct the validation/test sets, and the remaining data collection as training set. Note that we select the queried questions in proportion to the number of questions and categories against the whole distribution to have a better control over a possible imbalance. To obtain the groundtruth, we employ the Vector Space Model (VSM) (Salton et al., 1975) to retrieve the top 10 results and obtain manual judgements. The top 10 results don't include the queried question itself. Given a returned result by VSM, an annotator is asked to label it with \"relevant\" or \"irrelevant\". If a returned result is considered semantically equivalent to the queried question, the annotator will label it as \"relevant\"; otherwise, the annotator will label it as \"irrelevant\". Two annotators are involved in the annotation process. If a conflict happens, a third person will make judgement for the final result. In the process of manually judging questions, the annotators are presented only the questions. As a result, there are in total 20,000 judged question pairs. We randomly split the 2,000 queried questions into validation/test sets, each has 1,000/1,000 queried questions. We use the validation set for parameter tuning and the test set for evaluation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 459, |
|
"end": 480, |
|
"text": "(Salton et al., 1975)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Set and Evaluation Metrics", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Evaluation Metrics: We evaluate the performance of question retrieval using the following metrics: Mean Average Precision (MAP) and Precision@N (P@N). MAP rewards methods that return relevant questions early and also rewards correct ranking of the results. P@N reports the fraction of the top-N questions retrieved that are relevant. We perform a significant test, i.e., a t-test with a default significant level of 0.05.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Set and Evaluation Metrics", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "There are several parameters used in the paper, we tune these parameters on the validation set. Specifically, we set the number of category-specific topics per category and the number of shared topics in GNMFNC as (K s , K p ) = {(5, 2), (10, 4), (20, 8), (40, 16), (80, 32)}, resulting in K = {57, 114, 228, 456, 912} total number of topics. (Note that the total number of topics in GNMFNC is K s + 26 \u00d7 K p , where 26 is the number of categories in the first-level predefined natural hierarchy 4 ). Finally, we set (K s , K p ) = (20, 8) and K = 228 empirically as this setting yields the best performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Set and Evaluation Metrics", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "For regularization parameters \u03b1 p and \u03b2 l , it is difficult to directly tune on the validation set, we present an alternative way by adding a common factor a to look at the objective function of optimization problem in equation 3on the training data. In other words, we set \u03b1 p = a Ks\u00d7Kp and \u03b2 l = a Kp\u00d7K l . Therefore, we tune the parameters \u03b1 p and \u03b2 l by alternatively adjusting the common factor a via grid search. As a result, we set a = 100, resulting in \u03b1 p = \u03b2 l = 0.625 in the following experiments. The trade-off parameter \u03b3 in the linear combination is set from 0 to 1 in steps of 0.1 for all methods. We set \u03b3 = 0.6 empirically. For shrinkage regularization parameters, we empirically set \u03c3 1 = \u03c3 2 = \u03c3 3 = 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Set and Evaluation Metrics", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In this experiment, we present the experimental results for question retrieval on the test data set. Specifically, for our proposed GNMFNC, we combine the latent topic matching scores with the term matching scores given by BM25 and LM, denoted as \"BM25+GNMFNC\" and \"LM+GNMFNC\". Table 2 shows the main retrieval performances under the evaluation metrics MAP, P@1 and P@10. Row 1 and row 2 are the baseline systems, which model the relevance ranking using BM25 (Robertson et al., 1994) and language model (LM) (Zhai and Lafferty, 2001) in the term space. Row 3 is word-based translation model (Jeon et al., 2005) , and row 4 is word-based translation language model (TRLM) (Xue et al., 2008) . Row 5 is phrase-based translation model , and row 6 is the entity-based translation model (Singh, 2012) . Row 7 to row 11 explore the natural categories for question retrieval. In row 7, Cao et al. (2010) employed the natural categories to compute the local and global relevance with different model combination, here we use the combination VSM + TRLM for comparison because this combination obtains the superior performance than others. In row 8, proposed a category-enhanced TRLM for question retrieval. There are some clear trends in the results of Table 2: (1) BM25+GNMFNC and LM+GNMFNC perform significantly better than BM25 and LM respectively (t-test, p-value < 0.05, row 1 vs. row 9; row 2 vs. row 10), indicating the effective of GNMFNC.", |
|
"cite_spans": [ |
|
{ |
|
"start": 459, |
|
"end": 483, |
|
"text": "(Robertson et al., 1994)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 508, |
|
"end": 533, |
|
"text": "(Zhai and Lafferty, 2001)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 591, |
|
"end": 610, |
|
"text": "(Jeon et al., 2005)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 671, |
|
"end": 689, |
|
"text": "(Xue et al., 2008)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 782, |
|
"end": 795, |
|
"text": "(Singh, 2012)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 879, |
|
"end": 896, |
|
"text": "Cao et al. (2010)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 278, |
|
"end": 285, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 1244, |
|
"end": 1252, |
|
"text": "Table 2:", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Question Retrieval Results", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "(2) BM25+GNMFNC and LM+GNMFNC perform better than translation methods, some improvements are statistical significant (t-test, p-value < 0.05, row 3 and row 4 vs. row 9 and row 10). The reason may be that GNMFNC models the relevance ranking in the latent topic space, which can also effectively solve the the lexical gap problem.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Question Retrieval Results", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "(3) Capturing the shared aspects and the category-specific individual aspects with natural categories in the group modeling framework can significantly improve the performance of question retrieval (t-test, p-value < 0.05, row 7 and row 8 vs. row 9 and row 10).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Question Retrieval Results", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "(4) Natural categories are useful and effectiveness for question retrieval, no matter in the group modeling framework or existing retrieval models (row 3\u223c row 6 vs. row 7\u223crow 10).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Question Retrieval Results", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We note that our proposed GNMFNC is related to non-negative matrix factorization (NMF) (Lee and Seung, 2001 ) and its variants, we introduce three baselines. The first baseline is NMF, which is trained on the whole training data. The second baseline is CNMF, which is trained on each category without considering the shared topics. The third baseline is GNMF (Lee and Choi, 2009; , which is similar to our GNMFNC but there are no constraints on the category-specific topics to prevent them from capturing the information from the shared topics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 107, |
|
"text": "(Lee and Seung, 2001", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 359, |
|
"end": 379, |
|
"text": "(Lee and Choi, 2009;", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison of Matrix Factorizations", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "NMF and GNMF are trained on the training data with the same parameter settings in section 4.1 for fair comparison. For CNMF, we also train the model on the training data with the same parameter settings in section 4.1, except parameter K s , as there exists no shared topics in CNMF. Table 3 shows the question retrieval performance of NMF families on the test set, obtained with the best parameter settings determined by the validation set. From the results, we draw the following observations:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 284, |
|
"end": 291, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison of Matrix Factorizations", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "(1) All of these methods can significantly improve the performance in comparison to the baseline BM25 and LM (t-test, p-value < 0.05).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison of Matrix Factorizations", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "(2) GNMF and GNMFNC perform significantly better than NMF and CNMF respectively (t-test, pvalue < 0.05), indicating the effectiveness of group matrix factorization framework, especially the use of shared topics. (3) GNMFNC performs significantly better than GNMF (t-test, p-value < 0.05, row 4 vs. row 5; row 9 vs. row 10), indicating the effectiveness of the regularization term on the category-specific topics to prevent them from capturing the information from the shared topics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison of Matrix Factorizations", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "From the experimental results reported above, we can conclude that our proposed GNMFNC is useful for question retrieval with high accuracies. To the best of our knowledge, it is the first time to investigate the group matrix factorization for question retrieval.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison of Matrix Factorizations", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In subsection 2.3.1, we have shown that the multiplicative updates given by equations (4)\u223c(6) are convergent. Here, we empirically show the convergence behavior of GNMFNC. Figure 1 shows the convergence curve of GNMFNC on the training data set. From the figure, y-axis is the value of objective function and x-axis denotes the iteration number. We can see that the multiplicative updates for GNMFNC converge very fast, usually within 80 iterations.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 172, |
|
"end": 180, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Convergence Behavior", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "One success of this paper is to use regularized constrains on the category-specific topics to prevent them from capturing the information from the shared topics. It is necessary to give an in-depth analysis of the regularization parameters used in the paper. Consider the regularization term used in equation (2), each element in U T s U p and U T p U l has a value between 0 and 1 as each column of U s , U p and U l is normalized. Therefore, it is appropriate to normalize the term having \u2225U T s U p \u2225 2 F by K s K p since there are K s \u00d7 K p elements in U T s U p . Similarly, \u2225U T p U l \u2225 2 F is normalized by K l K p . Note that K l = K p and l \u0338 = p. As discussed in subsection 4.1, we present an alternative way by adding a common factor a and set \u03b1 p = a Ks\u00d7Kp and \u03b2 l = a Kp\u00d7K l . The common factor a is used to adjust a trade-off between the matrix factorization errors and the mutual orthogonality, which cannot directly tune on the validation set. Thus, we look at the objective function of optimization problem in equation 3on the training data and find the optimum value for a. Figure 2 shows the objective function value vs. common factor a, where y-axis denotes the converged objective function value, and x-axis denotes Log 10 a . We can see that the optimum value of a is 100. Therefore, the common factor a can be fixed at 100 for our data set used in the paper, resulting in \u03b1 p = \u03b2 l = 0.625. Note that the optimum value of (K s , K p ) are set as (20, 8) in subsection 4.1. Due to limited space, we do not give an in-depth analysis for other parameters.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1092, |
|
"end": 1100, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Regularization Parameters Selection", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "In this paper, we propose a novel approach, called group non-negative matrix factorization with natural categories (GNMFNC). The proposed method is achieved by learning the category-specific topics for each category as well as shared topics across all categories via a group non-negative matrix factorization framework. We derive an efficient algorithm for learning the factorization, analyze its complexity, and provide proof of convergence. Experiments show that our proposed approach significantly outperforms various baseline methods and achieves state-of-the-art performance for question retrieval.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "There are some ways in which this research could be continued. First, the optimization of GNMFNC can be decomposed into many sub-optimization problems, a natural avenue for future research is to reduce the running time by executing the optimization in a distributed computing environment (e.g., MapReduce (Dean et al., 2004) ). Second, another combination approach will be used to incorporate the latent topic match score as a feature in a learning to rank model, e.g., LambdaRank (Burges et al., 2007) . Third, we will try to investigate the use of the proposed approach for other kinds of data sets with larger categories, such as categorized documents from ODP project. 5 ", |
|
"cite_spans": [ |
|
{ |
|
"start": 305, |
|
"end": 324, |
|
"text": "(Dean et al., 2004)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 481, |
|
"end": 502, |
|
"text": "(Burges et al., 2007)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 673, |
|
"end": 674, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "http://developer.yahoo.com/answers 4 Here we do not use the leaf categories because we find that it is not possible to run GNMFNC with such large number of topics on the current machines, and we will leave it for future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was supported by the National Natural Science Foundation of China (No. 61333018 and No. 61303180), the Beijing Natural Science Foundation (No. 4144087), CCF Opening Project of Chinese Information Processing, and also Sponsored by CCF-Tencent Open Research Fund. We thank the anonymous reviewers for their insightful comments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Combining lexical semantic resources with question & answer archives for translation-based answer finding", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Bernhard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "728--736", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Bernhard and I. Gurevych. 2009. Combining lexical semantic resources with question & answer archives for translation-based answer finding. In Proceedings of ACL, pages 728-736.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Convex Optimization", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Boyd", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Vandenberghe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Boyd and L. Vandenberghe. 2004. Convex Optimization. Cambridge university press.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "SVD based initialization: a head start for nonnegative matrix factorization", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Boutsidis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Gallopoulos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Pattern Recognition", |
|
"volume": "41", |
|
"issue": "4", |
|
"pages": "1350--1362", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Boutsidis and E. Gallopoulos. 2008. SVD based initialization: a head start for nonnegative matrix factorization. Pattern Recognition, 41(4):1350-1362.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Learning to rank with nonsmooth cost function", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Burges", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Ragno", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Burges, R. Ragno, and Q. Le. 2007. Learning to rank with nonsmooth cost function. In Proceedings of NIPS.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Learning the latent topics for question retrieval in community QA", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of IJCNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "L. Cai, G. Zhou, K. Liu, and J. Zhao. 2011. Learning the latent topics for question retrieval in community QA. In Proceedings of IJCNLP.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "The use of categorization information in language models for question retrieval", |
|
"authors": [ |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Cong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Jensen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of CIKM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "265--274", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "X. Cao, G. Cong, B. Cui, C. Jensen, and C. Zhang. 2009. The use of categorization information in language models for question retrieval. In Proceedings of CIKM, pages 265-274.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A generalized framework of exploring category information for question retrieval in community question answer archives", |
|
"authors": [ |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Cong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Jensen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of WWW", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "X. Cao, G. Cong, B. Cui, and C. Jensen. 2010. A generalized framework of exploring category information for question retrieval in community question answer archives. In Proceedings of WWW.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Mapreduce: simplified data processing on large clusters", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Ghemanwat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Inc", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of OSDI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Dean, S. Ghemanwat, and G. Inc. 2004. Mapreduce: simplified data processing on large clusters. In Proceed- ings of OSDI.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Searching questions by identifying questions topics and question focus", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "156--164", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. Duan, Y. Cao, C. Lin, and Y. Yu. 2008. Searching questions by identifying questions topics and question focus. In Proceedings of ACL, pages 156-164.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Finding similar questions in large question and answer archives", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Jeon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Croft", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of CIKM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "84--90", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Jeon, W. Croft, and J. Lee. 2005. Finding similar questions in large question and answer archives. In Proceed- ings of CIKM, pages 84-90.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "A category-integrated language model for question retrieval in community question answering", |
|
"authors": [ |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of AIRS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "14--25", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Z. Ji, F. Xu, and B. Wang. 2012. A category-integrated language model for question retrieval in community question answering. In Proceedings of AIRS, pages 14-25.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Non-negative matrix factorization based on alternating non-negativity constrained least squares and active set method", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Park", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "SIAM J Matrix Anal Appl", |
|
"volume": "30", |
|
"issue": "2", |
|
"pages": "713--730", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. Kim and H. Park. 2008. Non-negative matrix factorization based on alternating non-negativity constrained least squares and active set method. SIAM J Matrix Anal Appl, 30(2):713-730.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Initializations for the nonnegative matrix factorization", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Langville", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Meyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Albright", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Cox", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Duling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of KDD", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Langville, C. Meyer, R. Albright, J. Cox, and D. Duling. 2006. Initializations for the nonnegative matrix factorization. In Proceedings of KDD.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Bridging lexical gaps between queries and questions on large online Q&A collections with compact translation models", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Rim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "410--418", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Lee, S. Kim, Y. Song, and H. Rim. 2008. Bridging lexical gaps between queries and questions on large online Q&A collections with compact translation models. In Proceedings of EMNLP, pages 410-418.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Algorithms for non-negative matrix factorization", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Seung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Lee and H. Seung. 2001. Algorithms for non-negative matrix factorization. In Proceedings of NIPS. 5 http://www.dmoz.org/", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Group nonnegative matrix factorization for eeg classification", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of AISTATS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "320--327", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. Lee and S. Choi. 2009. Group nonnegative matrix factorization for eeg classification. In Proceedings of AISTATS, pages 320-327.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Projected gradient methods for nonnegative matrix factorization", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Neural Comput", |
|
"volume": "19", |
|
"issue": "10", |
|
"pages": "2756--2779", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Lin. 2007. Projected gradient methods for nonnegative matrix factorization. Neural Comput, 19(10):2756- 2779.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Exploring domain-specific term weight in archived question search", |
|
"authors": [ |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Ming", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Chua", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Cong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of CIKM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1605--1608", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Z. Ming, T. Chua, and G. Cong. 2010. Exploring domain-specific term weight in archived question search. In Proceedings of CIKM, pages 1605-1608.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Statistical machine translation for query expansion in answer retrieval", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Riezler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Vasserman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Tsochantaridis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Mittal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "464--471", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Riezler, A. Vasserman, I. Tsochantaridis, V. Mittal, and Y. Liu. 2007. Statistical machine translation for query expansion in answer retrieval. In Proceedings of ACL, pages 464-471.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Okapi at trec-3", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Robertson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Walker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Hancock-Beaulieu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Gatford", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "Proceedings of TREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "109--126", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Robertson, S. Walker, S. Jones, M. Hancock-Beaulieu, and M. Gatford. 1994. Okapi at trec-3. In Proceedings of TREC, pages 109-126.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A vector space model for automatic indexing", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Salton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Wong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1975, |
|
"venue": "Communications of the ACM", |
|
"volume": "18", |
|
"issue": "11", |
|
"pages": "613--620", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. Salton, A. Wong, and C. Yang. 1975. A vector space model for automatic indexing. Communications of the ACM, 18(11):613-620.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Entity based q&a retrieval", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1266--1277", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Singh. 2012. Entity based q&a retrieval. In Proceedings of EMNLP-CoNLL, pages 1266-1277.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Group matrix factorizaiton for scalable topic modeling", |
|
"authors": [ |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Xun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of SIGIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Q. Wang, Z. Cao, J. Xun, and H. Li. 2012. Group matrix factorizaiton for scalable topic modeling. In Proceedings of SIGIR.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Retrieval models for question and answer archives", |
|
"authors": [ |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Jeon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Croft", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of SIGIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "475--482", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "X. Xue, J. Jeon, and W. Croft. 2008. Retrieval models for question and answer archives. In Proceedings of SIGIR, pages 475-482.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "A study of smooth methods for language models applied to ad hoc information retrieval", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Zhai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Lafferty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of SIGIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "334--342", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Zhai and J. Lafferty. 2001. A study of smooth methods for language models applied to ad hoc information retrieval. In Proceedings of SIGIR, pages 334-342.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Phrase-based translation model for question retrieval in community question answer archives", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "653--662", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. Zhou, L. Cai, J. Zhao, and K. Liu. 2011. Phrase-based translation model for question retrieval in community question answer archives. In Proceedings of ACL, pages 653-662.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Statistical machine translation improves question retrieval in community question answering via matrix factorization", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "852--861", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. Zhou, F. Liu, Y. Liu, S. He, and J. Zhao. 2013. Statistical machine translation improves question retrieval in community question answering via matrix factorization. In Proceedings of ACL, pages 852-861.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Toward faster and better retrieval models for question search", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of CIKM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2139--2148", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. Zhou, Y. Chen, D. Zeng, and J. Zhao. 2013. Toward faster and better retrieval models for question search. In Proceedings of CIKM, pages 2139-2148.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"uris": null, |
|
"text": "Convergence curve of GNMFNC.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"text": "Objective function value vs. factor a.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"html": null, |
|
"text": "where H p \u2208 R Ks\u00d7Np and W p \u2208 R Kp\u00d7Np correspond to the coefficients of shared topics U s and category-specific topics U p , respectively.Thus, given a question collectionD = {D 1 , D 2 , \u2022 \u2022 \u2022 , D P }together with the category labels C = {c 1 , c 2 , \u2022 \u2022 \u2022 , c P }, our proposed GNMFNC amounts to modeling the question collection D with P group simultaneously, arriving at the following objective function:", |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"text": "Computational operation counts for each iteration in GNMFNC.", |
|
"num": null, |
|
"content": "<table><tr><td>Based on Theorem 2.2, we can fix U</td><td>(t)</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"text": "Comparison with different methods for question retrieval.", |
|
"num": null, |
|
"content": "<table><tr><td>#</td><td>Methods</td><td>MAP</td><td>P@10</td></tr><tr><td>1</td><td>BM25</td><td>0.243</td><td>0.225</td></tr><tr><td>2</td><td>LM</td><td>0.286</td><td>0.232</td></tr><tr><td>3</td><td>(Jeon et al., 2005)</td><td>0.327</td><td>0.235</td></tr><tr><td>4</td><td>(Xue et al., 2008)</td><td>0.341</td><td>0.238</td></tr><tr><td>5</td><td>(Zhou et al., 2011)</td><td>0.365</td><td>0.243</td></tr><tr><td>6</td><td>(Singh, 2012)</td><td>0.354</td><td>0.240</td></tr><tr><td>7</td><td>(Cao et al., 2010)</td><td>0.358</td><td>0.242</td></tr><tr><td>8</td><td>(Cai et al., 2011)</td><td>0.331</td><td>0.236</td></tr><tr><td>9</td><td>BM25+GNMFNC</td><td>0.369</td><td>0.248</td></tr><tr><td>10</td><td>LM+GNMFNC</td><td>0.374</td><td>0.251</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"text": "Comparison of matrix factorizations for question retrieval.", |
|
"num": null, |
|
"content": "<table><tr><td>#</td><td>Methods</td><td>MAP</td><td>P@10</td></tr><tr><td>1</td><td>BM25</td><td>0.243</td><td>0.225</td></tr><tr><td>2</td><td>BM25+NMF</td><td>0.325</td><td>0.235</td></tr><tr><td>3</td><td>BM25+CNMF</td><td>0.344</td><td>0.239</td></tr><tr><td>4</td><td>BM25+GNMF</td><td>0.361</td><td>0.242</td></tr><tr><td>5</td><td>BM25+GNMFNC</td><td>0.369</td><td>0.248</td></tr><tr><td>6</td><td>LM</td><td>0.286</td><td>0.232</td></tr><tr><td>7</td><td>LM+NMF</td><td>0.337</td><td>0.237</td></tr><tr><td>8</td><td>LM+CNMF</td><td>0.352</td><td>0.240</td></tr><tr><td>9</td><td>LM+GNMF</td><td>0.365</td><td>0.243</td></tr><tr><td>10</td><td>LM+GNMFNC</td><td>0.374</td><td>0.251</td></tr></table>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |