|
{ |
|
"paper_id": "C18-1025", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:07:18.025196Z" |
|
}, |
|
"title": "Joint Learning from Labeled and Unlabeled Data for Information Retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "China Normal University Wuhan", |
|
"location": { |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ping", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "China Normal University Wuhan", |
|
"location": { |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Le", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "China Normal University Wuhan", |
|
"location": { |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Recently, a significant number of studies have focused on neural information retrieval (IR) models. One category of works use unlabeled data to train general word embeddings based on term proximity. The general embeddings can be integrated into traditional IR models. The other category employs labeled data (e.g. click-through data) to train end-to-end neural IR models consisting of layers for target-specific representation learning. The latter idea accounts better for the IR task and is favored by recent research works, which is the one we will follow in this paper. We hypothesize that general semantics learned from unlabeled data can complement task-specific representation learned from labeled data of limited quality, and that a combination of the two is favorable. To this end, we propose a learning framework which can benefit from both labeled and more abundant unlabeled data for representation learning in the context of IR. Through a joint learning fashion in a single neural framework, the learned representation is optimized to minimize both the supervised loss on query-document matching and the unsupervised loss on text reconstruction. Standard retrieval experiments on TREC collections indicate that the joint learning methodology leads to significant better performance of retrieval over several strong baselines for IR.", |
|
"pdf_parse": { |
|
"paper_id": "C18-1025", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Recently, a significant number of studies have focused on neural information retrieval (IR) models. One category of works use unlabeled data to train general word embeddings based on term proximity. The general embeddings can be integrated into traditional IR models. The other category employs labeled data (e.g. click-through data) to train end-to-end neural IR models consisting of layers for target-specific representation learning. The latter idea accounts better for the IR task and is favored by recent research works, which is the one we will follow in this paper. We hypothesize that general semantics learned from unlabeled data can complement task-specific representation learned from labeled data of limited quality, and that a combination of the two is favorable. To this end, we propose a learning framework which can benefit from both labeled and more abundant unlabeled data for representation learning in the context of IR. Through a joint learning fashion in a single neural framework, the learned representation is optimized to minimize both the supervised loss on query-document matching and the unsupervised loss on text reconstruction. Standard retrieval experiments on TREC collections indicate that the joint learning methodology leads to significant better performance of retrieval over several strong baselines for IR.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In recent years, the research community has noticed the great success of neural networks in computer vision (Krizhevsky et al., 2012) , speech recognition and natural language processing (Mikolov et al., 2013) tasks. However, the potential of neural networks has not been fully investigated in the IR field. Although a significant number of studies (e.g. (Huang et al., 2013; Ganguly et al., 2015; Zheng and Callan, 2015; Guo et al., 2016; Zamani and Croft, 2016; Dehghani et al., 2017; ) try to apply neural networks in IR, there have been few studies reporting the performance that is comparable to state-of-the-art IR models. These approaches rely on the general idea that neural network can provide a low-dimensional and semantics-rich representation for both queries and documents. Such a representation can bridge lexical and semantic gaps in traditional IR models. Depending on if the embeddings are trained with discriminative information for IR tasks, existing works can be broadly divided into two categories (Zhang et al., 2016; .", |
|
"cite_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 133, |
|
"text": "(Krizhevsky et al., 2012)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 187, |
|
"end": 209, |
|
"text": "(Mikolov et al., 2013)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 355, |
|
"end": 375, |
|
"text": "(Huang et al., 2013;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 376, |
|
"end": 397, |
|
"text": "Ganguly et al., 2015;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 398, |
|
"end": 421, |
|
"text": "Zheng and Callan, 2015;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 422, |
|
"end": 439, |
|
"text": "Guo et al., 2016;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 440, |
|
"end": 463, |
|
"text": "Zamani and Croft, 2016;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 464, |
|
"end": 486, |
|
"text": "Dehghani et al., 2017;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1019, |
|
"end": 1039, |
|
"text": "(Zhang et al., 2016;", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The first category of approaches extend traditional IR models to incorporate word embeddings that are trained on huge and unlabeled corpora with existing models such as Word2vec (Mikolov et al., 2013) and GloVe (Pennington et al., 2014) in an unsupervised manner. These approaches (e.g. (Zheng and Callan, 2015; Nalisnick et al., 2016) ) leverage semantic information captured by word embeddings in order to enhance traditional IR models. We note that such models trained without references to the retrieval task model term proximity and do not contain discriminative information adapted for IR . The second category (e.g. (Huang et al., 2013; Guo et al., 2016) ) tries to incorporate word embedding learning within neural models for IR, which reflects a more significant shift toward an endto-end framework. These approaches treat word embeddings as layers in neural IR models, to be learned along with all model parameters in a supervised manner. Most studies in the second category rely on click-through data for relevance judgment between queries and documents. Text representation learned with relevance information captures relevance rather than term proximity, which clearly accounts better for IR requirements . However, supervised signals such as click-through data are often limited outside of large industrial research labs, probably due to user privacy concerns. It is thus not surprising to see that many authors following this methodology have industrial background (e.g. (Huang et al., 2013; Shen et al., 2014b; Nalisnick et al., 2016; ). In addition, point out that previous studies using click-through data make implicit but strong assumptions about clicked query-document pairs which are not necessarily met in practice.", |
|
"cite_spans": [ |
|
{ |
|
"start": 178, |
|
"end": 200, |
|
"text": "(Mikolov et al., 2013)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 236, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 287, |
|
"end": 311, |
|
"text": "(Zheng and Callan, 2015;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 312, |
|
"end": 335, |
|
"text": "Nalisnick et al., 2016)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 623, |
|
"end": 643, |
|
"text": "(Huang et al., 2013;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 644, |
|
"end": 661, |
|
"text": "Guo et al., 2016)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1486, |
|
"end": 1506, |
|
"text": "(Huang et al., 2013;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1507, |
|
"end": 1526, |
|
"text": "Shen et al., 2014b;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1527, |
|
"end": 1550, |
|
"text": "Nalisnick et al., 2016;", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Neural networks are hungry for data, a fact which also holds for neural IR tasks. One can find from above discussions that the second category of approaches suffer from the data spareness problem, although there have been recent attempts (Gupta et al., 2017; Dehghani et al., 2017) trying to pseudo label query-document pairs automatically with unsupervised retrieval models such as BM25. Using pseudo labels as relevance signals relieves data spareness in terms of quantity but not quality. The idea of using unsupervised learning to complement supervision has been practiced successfully in computer vision (Yang et al., 2013) and natural language processing (Rasmus et al., 2015) tasks. In such a background, we hypothesize that semantics learned from unlabeled data can complement task-specific representation learned from pseudo-labeled data of limited quality, and a combination of the two is favorable in IR. To the best of our knowledge, such a combination has never been investigated in neural IR models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 238, |
|
"end": 258, |
|
"text": "(Gupta et al., 2017;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 259, |
|
"end": 281, |
|
"text": "Dehghani et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 609, |
|
"end": 628, |
|
"text": "(Yang et al., 2013)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 661, |
|
"end": 682, |
|
"text": "(Rasmus et al., 2015)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we propose a learning framework which can benefit from both labeled and more abundant unlabeled data for representation learning in IR. Through joint learning in a single neural network, the learned representation can account for task-specific characteristics via supervised loss optimization on query-document matching, as well as preserving general semantics via unsupervised loss optimization on text reconstruction. We demonstrate by experiments that the joint learning model leads to significantly better performance over state-of-the-art IR models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Representation learning approaches based on neural networks have gained in prominence in recent years due to their extreme efficiency. They motivate the emerging research field of Neural IR. Neural approaches have attracted increasing interests of the IR community in very recent years. Apart from learning to rank approaches that train their models over a set of hand-crafted features (Liu, 2009) , neural IR models typically accept the raw text of queries and documents as input. The dense representations of words or texts can then be learned with or without reference to retrieval tasks, respectively corresponding to the two categories of methods summarized in section 1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 386, |
|
"end": 397, |
|
"text": "(Liu, 2009)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Unsupervised approaches learn general text representation without query and document interaction information. Embeddings pre-trained on unlabeled text with tools such as Word2vec (Mikolov et al., 2013) and Glove (Pennington et al., 2014) have been used to extend traditional IR models. Ganguly et al. (2015) develop a generalized language model with query-likelihood language modeling for integrating word embeddings as additional smoothing. Zheng and Callan (2015) represent term and query as vectors in the same latent space based on word embeddings so as to learn a model to reweight terms. Nalisnick et al. (2016) retain both input and output embeddings of Word2vec and map query words into the input space and document words into the output space. Zamani and Croft (2016) propose to use word embeddings to incorporate and weight terms not present in the query, acting as smoothing and query expansion. There are also studies developing their own embedding learning algorithms instead of using standard tools for embedding learning. For instance, Salakhutdinov and Hinton (2009) propose a deep auto-encoder model to generate a condensed binary vector representation of documents. Clinchant and Perronnin (2013) use latent semantic indexing to induce word embeddings for IR. Vuli\u0107 and Moens (2015) propose to learn from document-aligned comparable corpora the embeddings that can be used for both monolingual IR and cross-lingual IR.", |
|
"cite_spans": [ |
|
{ |
|
"start": 179, |
|
"end": 201, |
|
"text": "(Mikolov et al., 2013)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 212, |
|
"end": 237, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 286, |
|
"end": 307, |
|
"text": "Ganguly et al. (2015)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 442, |
|
"end": 465, |
|
"text": "Zheng and Callan (2015)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 594, |
|
"end": 617, |
|
"text": "Nalisnick et al. (2016)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 753, |
|
"end": 776, |
|
"text": "Zamani and Croft (2016)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 1051, |
|
"end": 1082, |
|
"text": "Salakhutdinov and Hinton (2009)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1278, |
|
"end": 1300, |
|
"text": "Vuli\u0107 and Moens (2015)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Supervised approaches use query-document relevance information to learn the representation that is optimized end-to-end for the task at hand. With click-through data, Huang et al. (2013) develop DSSM, a feed forward neural network with a word hashing phrase as the first layer to predict the click probability given a query string and a document title. DSSM is extended in (Shen et al., 2014a; Shen et al., 2014b) by incorporating convolutional neural network and max-pooling layers to extract the most salient local features. Since the DSSM related methods make implicit but strong assumptions about clicked data, try to relax the assumptions in their model. Guo et al. (2016) develop the DRMM model that takes the histogram-based features representing interactions between queries and documents as input into neural networks. DRMM is one of the first neural IR models to show improvement over traditional IR models. aim to simultaneously learn local and distributional representation to capture both lexical matching and semantic matching in IR. Following the discussion in section 1, we note that click-through data are not always available in massive amount outside of industrial labs. More recent works propose to use unsupervised IR models to pseudo label query-document pairs that provide weak supervision for representation learning. Dehghani et al. (2017) use BM25 to obtain relevant documents for a large set of AOL queries (Pass et al., 2006) which are then used as weakly supervised signals for joint embedding and ranking model training. employ similar supervision signals as (Dehghani et al., 2017) to train an embedding network similar to Word2vec and use the obtained embeddings for query expansion and query classification. Gupta et al. (2017) develop a cross-lingual IR model based on weak supervision. Luo et al. (2017) propose to train deep ranking models with weak relevance labels generated by click model based on click behavior of real users.", |
|
"cite_spans": [ |
|
{ |
|
"start": 167, |
|
"end": 186, |
|
"text": "Huang et al. (2013)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 373, |
|
"end": 393, |
|
"text": "(Shen et al., 2014a;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 394, |
|
"end": 413, |
|
"text": "Shen et al., 2014b)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 660, |
|
"end": 677, |
|
"text": "Guo et al. (2016)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1342, |
|
"end": 1364, |
|
"text": "Dehghani et al. (2017)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1434, |
|
"end": 1453, |
|
"text": "(Pass et al., 2006)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1589, |
|
"end": 1612, |
|
"text": "(Dehghani et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1741, |
|
"end": 1760, |
|
"text": "Gupta et al. (2017)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1821, |
|
"end": 1838, |
|
"text": "Luo et al. (2017)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We can conclude from above discussions that supervised approaches account better for task-specific features and are superior in IR. They rely on relevance information between query-document pairs of which the quality is relatively low in practice. In this paper, we follow successful practice in CV and NLP tasks and hypothesize that general and rich semantics learned from unlabeled data can complement task-specific representation learned from labeled data of limited quality. We will propose in section 3 a learning framework which can simultaneously learn from labeled and more abundant unlabeled data in the context of IR. By the way, we note that the joint learning framework resembles those studies (e.g. (Liu et al., 2015) ) which couple IR with another supervised learning task. Our framework differs from those studies in that we do not require additional data that are labeled for another supervised learning task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 712, |
|
"end": 730, |
|
"text": "(Liu et al., 2015)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this section, we will develop a joint framework to learn low-dimensional representation of queries and documents from both labeled and unlabeled data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint learning framework for IR", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The joint learning framework is illustrated in figure 1. It consists of three crucial components:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning framework", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 An encoding network. It embeds the raw input into low-dimensional representations that are designed to capture target-specific characteristics of IR.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning framework", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 A decoding network. It tries to reconstruct the input so as to benefit from unlabeled data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning framework", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 A pairwise ranking model. It makes use of supervision signals from labeled query-document pairs to perform document ranking.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning framework", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "On top of the network structure, we perform joint optimization of both supervised loss and unsupervised loss. The unsupervised learning process uses all the text collection (e.g. queries and documents) for learning rich and general semantics. The supervised learning process learns, from labeled querydocument pairs, discriminative representations adapted for IR. The joint training fashion makes two learning processes complement each other via co-tuning the shared hidden layers in the encoding networks to help the representation generalize better in the IR task. Figure 1 : The joint learning framework with labeled and unlabeled data. It consists of an encoding network, a decoding network and a pairwise ranking model. We impose an unsupervised loss and a supervised loss respectively on the reconstruction output and the pairwise ranking output, which is learned in a joint fashion in this paper. The low-dimensional representation is the one our model aims to learn.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 567, |
|
"end": 575, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Learning framework", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The unsupervised part learns the low-dimensional representation of text via an autoencoder style, which uses all the available text data. Following previous studies on text autoencoder (Chen and Zaki, 2017), we opt for the simple feed-forward neural network architecture for both the encoding and decoding parts in figure 1. For each layer of the encoding/decoding networks, we use Rectified Linear Unit (ReLU) as the activation function, a function recommended by many works in deep learning (LeCun et al., 2015) . In the feed-forward step, each layer l(l \u2265 1) is a fully-connected layer and its activation potential z l is given by:", |
|
"cite_spans": [ |
|
{ |
|
"start": 493, |
|
"end": 513, |
|
"text": "(LeCun et al., 2015)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised learning", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "z l = max(0, W l z l\u22121 + b l )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised learning", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where W l is the weight matrix at layer l and b l is the corresponding bias.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised learning", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The input layer (corresponding to l = 0) maps the input text into fixed-length vector. There have been two methodologies we can employ to represent the input text: one is the one-hot representation (Gupta et al., 2017) and its variants (Zhai and Zhang, 2016) ; the other one is the dense and semantically rich representations (He et al., 2017) . Empirical results do not indicate that one is always better than the other and we will make use of the former one in this paper. Given the set of text T , we follow previous studies such as (Zhai and Zhang, 2016; Chen and Zaki, 2017) and represent each input text t in T as log-normalized word count vector x \u2208 R |V | where |V | is the size of the vocabulary V . Each dimension of the input vector x is represented by:", |
|
"cite_spans": [ |
|
{ |
|
"start": 198, |
|
"end": 218, |
|
"text": "(Gupta et al., 2017)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 236, |
|
"end": 258, |
|
"text": "(Zhai and Zhang, 2016)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 326, |
|
"end": 343, |
|
"text": "(He et al., 2017)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 536, |
|
"end": 558, |
|
"text": "(Zhai and Zhang, 2016;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 559, |
|
"end": 579, |
|
"text": "Chen and Zaki, 2017)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised learning", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "x i = log[1 + tf (i)] max i\u2208V log[1 + tf (i)] , for i \u2208 V", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised learning", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where tf (i) is the term frequency of the i-th word in the vocabulary. Since the unsupervised learning part of the framework is modeled as an autoencoder, we want the unsupervised output x to resemble the input x, leading to the binary cross-entropy loss function l u on t that can be defined as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised learning", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "l u (t) = \u2212 i\u2208V [x i log(x i ) + (1 \u2212 x i ) log(1 \u2212 x i )]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised learning", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "(1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised learning", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The document ranking problem can not be modeled with the standard classification or regression framework. Following the methodology in learning to rank (Liu, 2009) , we model document ranking in the pairwise style where the relevance information is in the form of preferences between pairs of documents with respect to individual queries. In addition, we follow previous studies (Gupta et al., 2017) and make use of well-performing unsupervised retrieval models (e.g. BM25) to pseudo-label query and document pairs so as to obtain the relevance information. More details will be given in section 4.1. From figure 1 one can note that the hidden layers in the encoding networks are shared by unsupervised and supervised learning, and one can refer to the unsupervised learning part for details of the layers in the encoding networks. The supervised model, on top of the top-level representation layer (i.e. lowdimensional representation), tries to learn a model that, given the query q, assigns a larger score to document d 1 than document d 2 if the ground truth is that d 1 matches to q better. The supervised model is implemented as a pairwise ranking model in figure 1, which is again a feed forward neural networks. Inspired by such studies as (Yih et al., 2011), we can derive the probability P (d 1 q d 2 ) that d 1 is ranked higher than d 2 with respect to the query q via a logistic function:", |
|
"cite_spans": [ |
|
{ |
|
"start": 152, |
|
"end": 163, |
|
"text": "(Liu, 2009)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 379, |
|
"end": 399, |
|
"text": "(Gupta et al., 2017)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised learning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "P (d 1 q d 2 ) = 1 1 + e \u2212\u03c3[score(q,d 1 )\u2212score(q,d 2 )]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised learning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where the score function is computed with the pairwise ranking model, and the parameter \u03c3 is used to determine the shape of the sigmoid. The supervised training objective l s on a triplet of query-document pair (q, d 1 , d 2 ) can then be defined as the cross entropy loss, which is:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 211, |
|
"end": 222, |
|
"text": "(q, d 1 , d", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Supervised learning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "l s (q, d 1 , d 2 ) = \u2212 P (d 1 q d 2 ) log P (d 1 q d 2 ) \u2212 [1 \u2212 P (d 1 q d 2 )] log[1 \u2212 P (d 1 q d 2 )]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised learning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "(2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised learning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised learning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "P (d 1 q d 2 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised learning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "is the actual probability that d 1 is ranked higher than d 2 according to annotations (i.e. pseudo-labels of query-document pairs). The actual probability in this paper is estimated in a similar way as in (Dehghani et al., 2017) , which is:", |
|
"cite_spans": [ |
|
{ |
|
"start": 205, |
|
"end": 228, |
|
"text": "(Dehghani et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised learning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "P (d 1 q d 2 ) = 1 1 + e \u2212\u03c3[s(q,d 1 )\u2212s(q,d 2 )]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised learning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where s denotes the relevance scores obtained from training instances. In the training process, the positive sample d 1 for the query q can be chosen as the most relevant documents according to annotated relevance scores. The negative sample d 2 is selected randomly from the document collection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised learning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Combining the unsupervised loss l u in equation 1 on all text data, the supervised loss l s in equation 2 on all labeled query-document pairs, and the L2 norm regularization for weight matrices, one finally arrives at the objective function for the joint learning model, which is:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint learning with regularization", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "L(T, DS) = \u03b1 |T | t\u2208T l u (t) + \u03b2 |QD| (q,d 1 ,d 2 )\u2208QD l s (q, d 1 , d 2 ) + l\u2208LY W l 2 F (3)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint learning with regularization", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "where T and |T | denote the set of text data and its size, QD and |QD| denote the set of labeled querydocument pairs and its size, LY stands for all the hidden and output layers of the framework in figure 1, and W l is the weight matrix of the layer l in the network. The hyper-parameters \u03b1, \u03b2 control the importance of the unsupervised loss and the supervised loss. The joint loss function L(T, DS) can be optimized in the gradient-based way, and we use the Adam algorithm (Kingma and Ba, 2015) to compute the gradients.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint learning with regularization", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "In this section, we conduct IR experiments to demonstrate the effectiveness of our proposed model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments and results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The IR experiments are carried out against standard TREC collections consisting of one Robust track and one Web track, which represent different sizes and genres of heterogeneous text collections. These collections have been broadly used in recent studies (Zheng and Callan, 2015; Guo et al., 2016; Dehghani et al., 2017) . The details of these collections and corresponding queries are given in table 1. The Robust dataset is used in the standard form without change. The ClueWeb-09-Cat-B collection (or ClueWeb for short) is filtered to the set of documents with spam scores in the 60-th percentile with Waterloo Fusion spam scores 1 . For all TREC queries, we only make use of the title fields for retrieval. In order to build the labeled query-document pairs for supervised learning, we choose to use the more general methodology in (Gupta et al., 2017) instead of the one in (Dehghani et al., 2017) to relieve from data (i.e. AOL queries) only available from industrial labs. We fetch a set of news titles from the China Daily website 2 and use these titles as training queries to produce annotated query-document pairs. We use these training queries to retrieve the document collection with BM25. We make sure that no training queries appear in the evaluation query set in table 1. For each training query, we take the top 500 retrieved documents as positive samples. The negative samples are picked randomly from the document collection. There are other strategies for choosing negative samples (Wieting et al., 2015) , which is out of the scope of this paper. For unsupervised learning, we make use of training queries and evaluation document sets listed in table 1, as well as the Wikipedia articles 3 as the external resource.", |
|
"cite_spans": [ |
|
{ |
|
"start": 256, |
|
"end": 280, |
|
"text": "(Zheng and Callan, 2015;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 281, |
|
"end": 298, |
|
"text": "Guo et al., 2016;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 299, |
|
"end": 321, |
|
"text": "Dehghani et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 837, |
|
"end": 857, |
|
"text": "(Gupta et al., 2017)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 880, |
|
"end": 903, |
|
"text": "(Dehghani et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1502, |
|
"end": 1524, |
|
"text": "(Wieting et al., 2015)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data sets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We set the hyper-parameters of our model by following similar tasks such as (Dehghani et al., 2017) . The size and number of hidden layers are respectively selected from {64, 128, 256, 512, 1024} and {1, 2, 3, 4} . The values of \u03b1, \u03b2 in equation 3 are chosen from {0.001, 0.01, 0.1, 1, 10, 100, 1000}. We select the initial learning rate from {10 \u22123 , 10 \u22124 , 5 * 10 \u22124 , 10 \u22125 , 5 * 10 \u22125 }. The batch size for learning is selected from {64, 128, 256, 512}. These model hyper-parameters are tuned on the validation set (20% of the training queries used for validation).", |
|
"cite_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 99, |
|
"text": "(Dehghani et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 170, |
|
"end": 212, |
|
"text": "{64, 128, 256, 512, 1024} and {1, 2, 3, 4}", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental setup", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "For IR evaluation, we make use of mean average precision (MAP) of top-ranked 1000 documents , precision at rank 20 (P20), and normalized discounted cumulative gain at rank 20 (nDCG20). Statistically significant differences between various models are determined using the two-tailed paired t-test with p < 0.05.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental setup", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We compare the retrieval performance of our joint learning retrieval model with two categories of IR models: classic IR models showing state-of-the-art performance, and the recent neural ranking models for IR. Since our model is representation-focused rather than interaction-focused, we do not plan to compare our model with those based on relevance matching (Guo et al., 2016) in this paper. More importantly, since our model learns from weakly supervised signals by BM25, we are more interested in the comparisons to BM25 and similar models using weakly supervised signals, an experimental strategy also employed in (Dehghani et al., 2017) . Under such considerations, we perform experiments with the following baselines: \u2022 DSSM: It is a representative deep matching model proposed in (Huang et al., 2013) , which is a representation-focused model. The model is framed as a feed forward neural network with a word hashing layer.", |
|
"cite_spans": [ |
|
{ |
|
"start": 360, |
|
"end": 378, |
|
"text": "(Guo et al., 2016)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 619, |
|
"end": 642, |
|
"text": "(Dehghani et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 788, |
|
"end": 808, |
|
"text": "(Huang et al., 2013)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental setup", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 NRMS: It is a weakly-supervised neural IR model learned with automatically annotated querydocument pairs (Dehghani et al., 2017) . NRMS shows significant improvement over traditional IR models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 130, |
|
"text": "(Dehghani et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental setup", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Comparisons to classic models. We use here the recommended settings of the baseline models according to their original papers. Table 2 reports the experimental results on TREC datasets for our model and all the baseline models. One can find from the results that classic IR models BM25 and QL perform similarly on the two collections, a conclusion that is coincident with previous findings. Since BM25 is the model we employ to produce pseudo labels for supervised learning, we will not compare neural models with QL in the following discussions. The neural IR model DSSM performs significantly worse than the traditional BM25 model, due to its unsuitability for relevance matching and for handling the diverse matching requirements in long documents (Guo et al., 2016) . NRMS is a neural ranking model learned from automatically labeled data, which resembles our model. NRMS shows all the significant improvements over BM25. Our model proposed in this paper, by jointly learning from the labeled and unlabeled data, achieves the best overall performance. Our model always significantly outperforms BM25 by a large margin. Comparisons to neural models. We further compare our model with the neural IR models DSSM and NRMS. We find that our model performs better than DSSM and NRMS on all collections. Our model significantly outperforms DSSM in all the cases considered above. Our model significantly outperforms NRMS with only one exception that is not significant on Robust04 with nDCG20. By the way, we find that NRMS is also always significantly better than DSSM on all collections. The experimental conclusion is that our model is always significantly better than traditional IR models and mostly outperforms neural IR models considered above. Furthermore, we find that using unlabeled data for training in neural IR models is useful, since it leads to significant improvement over the neural models only using labeled data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 751, |
|
"end": 769, |
|
"text": "(Guo et al., 2016)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 127, |
|
"end": 134, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and analysis", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Impact of unsupervised learning. It has been confirmed above that our model shows the best performance overall. However, it is not clear how much unsupervised learning contributes to the retrieval performance. We thus compare representations learned in a different setting without the help of unsupervised loss, which amounts to removing the unsupervised loss l u from equation 3. We perform IR experiments with the new model over data sets in table 1 and list results in table 3. From the results one can find that the performance of the model without unsupervised loss decreases from the joint model with significance in all the cases considered. It indicates that it is beneficial to combine unsupervised learning with supervised learning in neural IR. Empirical results in this part support our claim in this paper that learning from unlabeled data complements knowledge learned from labeled data in neural IR.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and analysis", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "In this paper, we propose a neural IR model which jointly learns from labeled and unlabeled data to benefit from both the rich and general semantics in unlabeled data and target-specific features in labeled data. As far as we can tell, it is the first time such a combination is investigated in neural IR. Experiments on TREC collections show that our model, without any human annotation, is significantly better than traditional IR models and recently proposed models based on neural networks. Experiments also show that using unsupervised learning to complement supervised learning with weak supervision is important in IR. A future direction to follow would be to use more expressive architectures such as LSTM to replace feed-forward networks used in this paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "https://plg.uwaterloo.ca/\u02dcgvcormac/clueweb09spam 2 http://www.chinadaily.com.cn 3 The wikipedia dump on September 1, 2017 can be obtained from https://dumps.wikimedia.org", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank the anonymous reviewers for their valuable comments. This work was supported by the Fundamental Research Funds for Central Universities of CCNU (No. CCNU15A05062).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Kate: K-competitive autoencoder for text", |
|
"authors": [ |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammed", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Zaki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "85--94", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu Chen and Mohammed J. Zaki. 2017. Kate: K-competitive autoencoder for text. In Proceedings of the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, KDD, pages 85-94.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Aggregating continuous word embeddings for information retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Stephane", |
|
"middle": [], |
|
"last": "Clinchant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Florent", |
|
"middle": [], |
|
"last": "Perronnin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Workshop on Continuous Vector Space Models and their Compositionality", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "100--109", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephane Clinchant and Florent Perronnin. 2013. Aggregating continuous word embeddings for information retrieval. In Proceedings of the Workshop on Continuous Vector Space Models and their Compositionality, pages 100-109.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Neural ranking models with weak supervision", |
|
"authors": [ |
|
{ |
|
"first": "Mostafa", |
|
"middle": [], |
|
"last": "Dehghani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hamed", |
|
"middle": [], |
|
"last": "Zamani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aliaksei", |
|
"middle": [], |
|
"last": "Severyn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaap", |
|
"middle": [], |
|
"last": "Kamps", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"Bruce" |
|
], |
|
"last": "Croft", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 40th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "65--74", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mostafa Dehghani, Hamed Zamani, Aliaksei Severyn, Jaap Kamps, and W. Bruce Croft. 2017. Neural ranking models with weak supervision. In Proceedings of the 40th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR, pages 65-74.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Word embedding based generalized language model for information retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Debasis", |
|
"middle": [], |
|
"last": "Ganguly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dwaipayan", |
|
"middle": [], |
|
"last": "Roy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Mitra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Gareth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 38th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "795--798", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Debasis Ganguly, Dwaipayan Roy, Mandar Mitra, and Gareth J.F. Jones. 2015. Word embedding based general- ized language model for information retrieval. In Proceedings of the 38th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR, pages 795-798.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "A deep relevance matching model for ad-hoc retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Jiafeng", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yixing", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qingyao", |
|
"middle": [], |
|
"last": "Ai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"Bruce" |
|
], |
|
"last": "Croft", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 25th ACM International Conference on Information and Knowledge Management, CIKM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "55--64", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiafeng Guo, Yixing Fan, Qingyao Ai, and W. Bruce Croft. 2016. A deep relevance matching model for ad-hoc re- trieval. In Proceedings of the 25th ACM International Conference on Information and Knowledge Management, CIKM, pages 55-64.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Continuous space models for clir", |
|
"authors": [ |
|
{ |
|
"first": "Parth", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rafael", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Banchs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Rosso", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Information Processing and Management", |
|
"volume": "53", |
|
"issue": "2", |
|
"pages": "359--370", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Parth Gupta, Rafael E. Banchs, and Paolo Rosso. 2017. Continuous space models for clir. Information Processing and Management, 53(2):359 -370.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Unsupervised cross-modal retrieval through adversarial learning", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Shen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 IEEE International Conference on Multimedia and Expo, ICME", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1153--1158", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "L. He, X. Xu, H. Lu, Y. Yang, F. Shen, and H. T. Shen. 2017. Unsupervised cross-modal retrieval through adversarial learning. In Proceedings of the 2017 IEEE International Conference on Multimedia and Expo, ICME, pages 1153-1158.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Deep neural networks for acoustic modeling in speech recognition: The shared views of four research groups", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Dahl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Jaitly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Senior", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Vanhoucke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Sainath", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Kingsbury", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "IEEE Signal Processing Magazine", |
|
"volume": "29", |
|
"issue": "6", |
|
"pages": "82--97", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. Hinton, L. Deng, D. Yu, G. E. Dahl, A. r. Mohamed, N. Jaitly, A. Senior, V. Vanhoucke, P. Nguyen, T. N. Sainath, and B. Kingsbury. 2012. Deep neural networks for acoustic modeling in speech recognition: The shared views of four research groups. IEEE Signal Processing Magazine, 29(6):82-97.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Learning deep structured semantic models for web search using clickthrough data", |
|
"authors": [ |
|
{ |
|
"first": "Po-Sen", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Acero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Larry", |
|
"middle": [], |
|
"last": "Heck", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 22nd ACM International Conference on Information and Knowledge Management, CIKM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2333--2338", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Po-Sen Huang, Xiaodong He, Jianfeng Gao, Li Deng, Alex Acero, and Larry Heck. 2013. Learning deep struc- tured semantic models for web search using clickthrough data. In Proceedings of the 22nd ACM International Conference on Information and Knowledge Management, CIKM, pages 2333-2338.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 3rd International Conference on Learning Representations, ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In Proceedings of the 3rd International Conference on Learning Representations, ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Imagenet classification with deep convolutional neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Krizhevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 25th International Conference on Neural Information Processing Systems, NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1097--1105", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Krizhevsky, Ilya Sutskever, and Geoffrey E. Hinton. 2012. Imagenet classification with deep convolutional neural networks. In Proceedings of the 25th International Conference on Neural Information Processing Sys- tems, NIPS, pages 1097-1105.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Deep learning", |
|
"authors": [ |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Lecun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Nature", |
|
"volume": "521", |
|
"issue": "", |
|
"pages": "436--444", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yann LeCun, Yoshua Bengio, and Geoffrey Hinton. 2015. Deep learning. Nature, 521:436-444.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Representation learning using multi-task deep neural networks for semantic classification and information retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Duh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ye-Yi", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "912--921", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaodong Liu, Jianfeng Gao, Xiaodong He, Li Deng, Kevin Duh, and Ye-Yi Wang. 2015. Representation learning using multi-task deep neural networks for semantic classification and information retrieval. In Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL, pages 912-921.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Learning to rank for information retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Tie-Yan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Foundations and Trends in Information Retrieval", |
|
"volume": "3", |
|
"issue": "3", |
|
"pages": "225--331", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tie-Yan Liu. 2009. Learning to rank for information retrieval. Foundations and Trends in Information Retrieval, 3(3):225-331.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Training deep ranking model with weak relevance labels", |
|
"authors": [ |
|
{ |
|
"first": "Cheng", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yukun", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiaxin", |
|
"middle": [], |
|
"last": "Mao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiqun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaoping", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Australasian Database Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "205--216", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cheng Luo, Yukun Zheng, Jiaxin Mao, Yiqun Liu, Min Zhang, and Shaoping Ma. 2017. Training deep ranking model with weak relevance labels. In Proceedings of the Australasian Database Conference, pages 205-216.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Distributed representations of words and phrases and their compositionality", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 26th International Conference on Neural Information Processing Systems, NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3111--3119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Distributed representations of words and phrases and their compositionality. In Proceedings of the 26th International Conference on Neural Information Processing Systems, NIPS, pages 3111-3119.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Neural models for information retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Bhaskar", |
|
"middle": [], |
|
"last": "Mitra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Craswell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bhaskar Mitra and Nick Craswell. 2017. Neural models for information retrieval. CoRR, abs/1705.01509.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Learning to match using local and distributed representations of text for web search", |
|
"authors": [ |
|
{ |
|
"first": "Bhaskar", |
|
"middle": [], |
|
"last": "Mitra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Diaz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Craswell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 26th International Conference on World Wide Web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1291--1299", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bhaskar Mitra, Fernando Diaz, and Nick Craswell. 2017. Learning to match using local and distributed represen- tations of text for web search. In Proceedings of the 26th International Conference on World Wide Web, WWW, pages 1291-1299.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Improving document ranking with dual word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Nalisnick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bhaskar", |
|
"middle": [], |
|
"last": "Mitra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Craswell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rich", |
|
"middle": [], |
|
"last": "Caruana", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 25th International Conference Companion on World Wide Web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "83--84", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric Nalisnick, Bhaskar Mitra, Nick Craswell, and Rich Caruana. 2016. Improving document ranking with dual word embeddings. In Proceedings of the 25th International Conference Companion on World Wide Web, WWW, pages 83-84.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "A picture of search", |
|
"authors": [ |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Pass", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abdur", |
|
"middle": [], |
|
"last": "Chowdhury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cayley", |
|
"middle": [], |
|
"last": "Torgeson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 1st International Conference on Scalable Information Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Greg Pass, Abdur Chowdhury, and Cayley Torgeson. 2006. A picture of search. In Proceedings of the 1st International Conference on Scalable Information Systems, InfoScale.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word representa- tion. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing, EMNLP, pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Semi-supervised learning with ladder networks", |
|
"authors": [ |
|
{ |
|
"first": "Antti", |
|
"middle": [], |
|
"last": "Rasmus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Harri", |
|
"middle": [], |
|
"last": "Valpola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mikko", |
|
"middle": [], |
|
"last": "Honkala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mathias", |
|
"middle": [], |
|
"last": "Berglund", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tapani", |
|
"middle": [], |
|
"last": "Raiko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 28th International Conference on Neural Information Processing Systems, NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3546--3554", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antti Rasmus, Harri Valpola, Mikko Honkala, Mathias Berglund, and Tapani Raiko. 2015. Semi-supervised learning with ladder networks. In Proceedings of the 28th International Conference on Neural Information Processing Systems, NIPS, pages 3546-3554.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Semantic hashing", |
|
"authors": [ |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "International Journal of Approximate Reasoning", |
|
"volume": "50", |
|
"issue": "7", |
|
"pages": "969--978", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruslan Salakhutdinov and Geoffrey Hinton. 2009. Semantic hashing. International Journal of Approximate Reasoning, 50(7):969-978.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "A latent semantic model with convolutional-pooling structure for information retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Yelong", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gr\u00e9goire", |
|
"middle": [], |
|
"last": "Mesnil", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 23rd ACM International Conference on Information and Knowledge Management, CIKM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "101--110", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yelong Shen, Xiaodong He, Jianfeng Gao, Li Deng, and Gr\u00e9goire Mesnil. 2014a. A latent semantic model with convolutional-pooling structure for information retrieval. In Proceedings of the 23rd ACM International Conference on Information and Knowledge Management, CIKM, pages 101-110.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Learning semantic representations using convolutional neural networks for web search", |
|
"authors": [ |
|
{ |
|
"first": "Yelong", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gr\u00e9goire", |
|
"middle": [], |
|
"last": "Mesnil", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 23rd International Conference on World Wide Web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "373--374", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yelong Shen, Xiaodong He, Jianfeng Gao, Li Deng, and Gr\u00e9goire Mesnil. 2014b. Learning semantic representa- tions using convolutional neural networks for web search. In Proceedings of the 23rd International Conference on World Wide Web, WWW, pages 373-374.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Monolingual and cross-lingual information retrieval models based on (bilingual) word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Vuli\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie-Francine", |
|
"middle": [], |
|
"last": "Moens", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 38th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "363--372", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ivan Vuli\u0107 and Marie-Francine Moens. 2015. Monolingual and cross-lingual information retrieval models based on (bilingual) word embeddings. In Proceedings of the 38th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR, pages 363-372.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "From paraphrase database to compositional paraphrase model and back", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Wieting", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karen", |
|
"middle": [], |
|
"last": "Livescu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "345--358", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Wieting, Mohit Bansal, Kevin Gimpel, and Karen Livescu. 2015. From paraphrase database to compositional paraphrase model and back. Transactions of the Association for Computational Linguistics, 3:345-358.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Semi-supervised learning of feature hierarchies for object detection in a video", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Shu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 IEEE Conference on Computer Vision and Pattern Recognition, CVPR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1650--1657", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Y. Yang, G. Shu, and M. Shah. 2013. Semi-supervised learning of feature hierarchies for object detection in a video. In Proceedings of the 2013 IEEE Conference on Computer Vision and Pattern Recognition, CVPR, pages 1650-1657.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Learning relevance from click data via neural network based similarity models", |
|
"authors": [ |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Ye", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Massey", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 IEEE International Conference on Big Data", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "801--806", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "X. Ye, Z. Qi, and D. Massey. 2015. Learning relevance from click data via neural network based similarity models. In Proceedings of the 2015 IEEE International Conference on Big Data, pages 801-806.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Learning discriminative projections for text similarity measures", |
|
"authors": [ |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Wen-Tau Yih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Platt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Meek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the Fifteenth Conference on Computational Natural Language Learning, CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "247--256", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wen-tau Yih, Kristina Toutanova, John C. Platt, and Christopher Meek. 2011. Learning discriminative projections for text similarity measures. In Proceedings of the Fifteenth Conference on Computational Natural Language Learning, CoNLL, pages 247-256.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Embedding-based query language models", |
|
"authors": [ |
|
{ |
|
"first": "Hamed", |
|
"middle": [], |
|
"last": "Zamani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W. Bruce", |
|
"middle": [], |
|
"last": "Croft", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 ACM International Conference on the Theory of Information Retrieval, ICTIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "147--156", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hamed Zamani and W. Bruce Croft. 2016. Embedding-based query language models. In Proceedings of the 2016 ACM International Conference on the Theory of Information Retrieval, ICTIR, pages 147-156.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Relevance-based word embedding", |
|
"authors": [ |
|
{ |
|
"first": "Hamed", |
|
"middle": [], |
|
"last": "Zamani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W. Bruce", |
|
"middle": [], |
|
"last": "Croft", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 40th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "505--514", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hamed Zamani and W. Bruce Croft. 2017. Relevance-based word embedding. In Proceedings of the 40th Interna- tional ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR, pages 505-514.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Semisupervised autoencoder for sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Shuangfei", |
|
"middle": [], |
|
"last": "Zhai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhongfei Mark", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Thirtieth AAAI Conference on Artificial Intelligence, AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1394--1400", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shuangfei Zhai and Zhongfei Mark Zhang. 2016. Semisupervised autoencoder for sentiment analysis. In Pro- ceedings of the Thirtieth AAAI Conference on Artificial Intelligence, AAAI, pages 1394-1400.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Neural information retrieval: A literature review", |
|
"authors": [ |
|
{ |
|
"first": "Wallace", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Lease", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wallace, and Matthew Lease. 2016. Neural information retrieval: A literature review. CoRR, abs/1611.06792.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Learning to reweight terms with distributed representations", |
|
"authors": [ |
|
{ |
|
"first": "Guoqing", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Callan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 38th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "575--584", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guoqing Zheng and Jamie Callan. 2015. Learning to reweight terms with distributed representations. In Proceed- ings of the 38th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR, pages 575-584.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "IR collection statistics (M = million, B=Billion).", |
|
"content": "<table><tr><td colspan=\"3\">Collections Doc count Word count</td><td>TREC topics</td></tr><tr><td>Robust04</td><td>0.5M</td><td>252M</td><td>301-450, 601-700</td></tr><tr><td>ClueWeb</td><td>34.0M</td><td>26.1B</td><td>1-200</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Retrieval performance of all models on TREC collections. Significant improvement or degradation at the level 0.05 with respect to BM25 is indicated as (+/-). The other significance comparisons are given in the text. Our Model 0.287 + 0.391 + 0.450 + 0.136 + 0.317 + 0.251 +\u2022 Classic models: The probabilistic BM25 model and query likelihood (QL) model based on Dirichlet smoothing are highly efficient IR models.", |
|
"content": "<table><tr><td/><td/><td>Robust04</td><td/><td/><td>ClueWeb</td><td/></tr><tr><td/><td>MAP</td><td>P20</td><td colspan=\"2\">nDCG20 MAP</td><td>P20</td><td>nDCG20</td></tr><tr><td>BM25</td><td>0.248</td><td>0.351</td><td>0.406</td><td>0.091</td><td>0.237</td><td>0.190</td></tr><tr><td>QL</td><td>0.245</td><td>0.352</td><td>0.404</td><td>0.092</td><td>0.239</td><td>0.193</td></tr><tr><td>DSSM</td><td colspan=\"3\">0.088 \u2212 0.163 \u2212 0.184 \u2212</td><td colspan=\"3\">0.037 \u2212 0.126 \u2212 0.104 \u2212</td></tr><tr><td>NRMS</td><td colspan=\"3\">0.275 + 0.378 + 0.441 +</td><td colspan=\"3\">0.127 + 0.302 + 0.236 +</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Retrieval performance of the model without unsupervised loss. Significant degradation at the level 0.05 with respect to our original model is indicated as -. Without unsupervised loss 0.262 \u2212 0.356 \u2212 0.413 \u2212 0.114 \u2212 0.298 \u2212 0.231 \u2212", |
|
"content": "<table><tr><td/><td/><td>Robust04</td><td/><td/><td>ClueWeb</td><td/></tr><tr><td/><td>MAP</td><td>P20</td><td colspan=\"2\">nDCG20 MAP</td><td>P20</td><td>nDCG20</td></tr><tr><td>Original Model</td><td>0.287</td><td>0.391</td><td>0.450</td><td>0.136</td><td>0.317</td><td>0.251</td></tr></table>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |