Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "D14-1002",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T15:53:17.165273Z"
},
"title": "Modeling Interestingness with Deep Neural Networks",
"authors": [
{
"first": "Jianfeng",
"middle": [],
"last": "Gao",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Microsoft Research One Microsoft Way Redmond",
"location": {
"postCode": "98052",
"region": "WA",
"country": "USA"
}
},
"email": "[email protected]"
},
{
"first": "Patrick",
"middle": [],
"last": "Pantel",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Microsoft Research One Microsoft Way Redmond",
"location": {
"postCode": "98052",
"region": "WA",
"country": "USA"
}
},
"email": "[email protected]"
},
{
"first": "Michael",
"middle": [],
"last": "Gamon",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Microsoft Research One Microsoft Way Redmond",
"location": {
"postCode": "98052",
"region": "WA",
"country": "USA"
}
},
"email": "[email protected]"
},
{
"first": "Xiaodong",
"middle": [],
"last": "He",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Microsoft Research One Microsoft Way Redmond",
"location": {
"postCode": "98052",
"region": "WA",
"country": "USA"
}
},
"email": ""
},
{
"first": "Li",
"middle": [],
"last": "Deng",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Microsoft Research One Microsoft Way Redmond",
"location": {
"postCode": "98052",
"region": "WA",
"country": "USA"
}
},
"email": "[email protected]"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "This paper presents a deep semantic similarity model (DSSM), a special type of deep neural networks designed for text analysis, for recommending target documents to be of interest to a user based on a source document that she is reading. We observe, identify, and detect naturally occurring signals of interestingness in click transitions on the Web between source and target documents, which we collect from commercial Web browser logs. The DSSM is trained on millions of Web transitions, and maps source-target document pairs to feature vectors in a latent space in such a way that the distance between source documents and their corresponding interesting targets in that space is minimized. The effectiveness of the DSSM is demonstrated using two interestingness tasks: automatic highlighting and contextual entity search. The results on large-scale, real-world datasets show that the semantics of documents are important for modeling interestingness and that the DSSM leads to significant quality improvement on both tasks, outperforming not only the classic document models that do not use semantics but also state-of-the-art topic models.",
"pdf_parse": {
"paper_id": "D14-1002",
"_pdf_hash": "",
"abstract": [
{
"text": "This paper presents a deep semantic similarity model (DSSM), a special type of deep neural networks designed for text analysis, for recommending target documents to be of interest to a user based on a source document that she is reading. We observe, identify, and detect naturally occurring signals of interestingness in click transitions on the Web between source and target documents, which we collect from commercial Web browser logs. The DSSM is trained on millions of Web transitions, and maps source-target document pairs to feature vectors in a latent space in such a way that the distance between source documents and their corresponding interesting targets in that space is minimized. The effectiveness of the DSSM is demonstrated using two interestingness tasks: automatic highlighting and contextual entity search. The results on large-scale, real-world datasets show that the semantics of documents are important for modeling interestingness and that the DSSM leads to significant quality improvement on both tasks, outperforming not only the classic document models that do not use semantics but also state-of-the-art topic models.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Tasks of predicting what interests a user based on the document she is reading are fundamental to many online recommendation systems. A recent survey is due to Ricci et al. (2011) . In this paper, we exploit the use of a deep semantic model for two such interestingness tasks in which document semantics play a crucial role: automatic highlighting and contextual entity search.",
"cite_spans": [
{
"start": 160,
"end": 179,
"text": "Ricci et al. (2011)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Automatic Highlighting. In this task we want a recommendation system to automatically discover the entities (e.g., a person, location, organi-zation etc.) that interest a user when reading a document and to highlight the corresponding text spans, referred to as keywords afterwards. We show in this study that document semantics are among the most important factors that influence what is perceived as interesting to the user. For example, we observe in Web browsing logs that when a user reads an article about a movie, she is more likely to browse to an article about an actor or character than to another movie or the director.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Contextual entity search. After identifying the keywords that represent the entities of interest to the user, we also want the system to recommend new, interesting documents by searching the Web for supplementary information about these entities. The task is challenging because the same keywords often refer to different entities, and interesting supplementary information to the highlighted entity is highly sensitive to the semantic context. For example, \"Paul Simon\" can refer to many people, such as the singer and the senator. Consider an article about the music of Paul Simon and another about his life. Related content about his upcoming concert tour is much more interesting in the first context, while an article about his family is more interesting in the second.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "At the heart of these two tasks is the notion of interestingness. In this paper, we model and make use of this notion of interestingness with a deep semantic similarity model (DSSM). The model, extending from the deep neural networks shown recently to be highly effective for speech recognition (Hinton et al., 2012; Deng et al., 2013) and computer vision (Krizhevsky et al., 2012; Markoff, 2014) , is semantic because it maps documents to feature vectors in a latent semantic space, also known as semantic representations. The model is deep because it employs a neural network with several hidden layers including a special convolutional-pooling structure to identify keywords and extract hidden semantic features at different levels of abstractions, layer by layer. The semantic representation is computed through a deep neural network after its training by backpropagation with respect to an objective tailored to the respective interestingness tasks. We obtain naturally occurring \"interest\" signals by observing Web browser transitions, from a source document to a target document, in Web usage logs of a commercial browser. Our training data is sampled from these transitions.",
"cite_spans": [
{
"start": 295,
"end": 316,
"text": "(Hinton et al., 2012;",
"ref_id": "BIBREF21"
},
{
"start": 317,
"end": 335,
"text": "Deng et al., 2013)",
"ref_id": "BIBREF10"
},
{
"start": 356,
"end": 381,
"text": "(Krizhevsky et al., 2012;",
"ref_id": "BIBREF26"
},
{
"start": 382,
"end": 396,
"text": "Markoff, 2014)",
"ref_id": "BIBREF28"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The use of the DSSM to model interestingness is motivated by the recent success of applying related deep neural networks to computer vision (Krizhevshy et al. 2012; Markoff, 2014) , speech recognition (Hinton et al. 2012 ), text processing (Collobert et al. 2011) , and Web search (Huang et al. 2013) . Among them, (Huang et al. 2013) is most relevant to our work. They also use a deep neural network to map documents to feature vectors in a latent semantic space. However, their model is designed to represent the relevance between queries and documents, which differs from the notion of interestingness between documents studied in this paper. It is often the case that a user is interested in a document because it provides supplementary information about the entities or concepts she encounters when reading another document although the overall contents of the second documents is not highly relevant. For example, a user may be interested in knowing more about the history of University of Washington after reading the news about President Obama's visit to Seattle. To better model interestingness, we extend the model of Huang et al. (2013) in two significant aspects. First, while Huang et al. treat a document as a bag of words for semantic mapping, the DSSM treats a document as a sequence of words and tries to discover prominent keywords. These keywords represent the entities or concepts that might interest users, via the convolutional and max-pooling layers which are related to the deep models used for computer vision (Krizhevsky et al., 2013) and speech recognition (Deng et al., 2013a) but are not used in Huang et al.'s model. The DSSM then forms the high-level semantic representation of the whole document based on these keywords. Second, instead of directly computing the document relevance score using cosine similarity in the learned semantic space, as in Huang et al. (2013) , we feed the features derived from the semantic representations of documents to a ranker which is trained in a supervised manner. As a result, a document that is not highly relevant to another document a user is reading (i.e., the distance between their derived feature 1 We stress here that, although the click signal is available to form a dataset and a gold standard ranker (to be described in vectors is big) may still have a high score of interestingness because the former provides useful information about an entity mentioned in the latter. Such information and entity are encoded, respectively, by (some subsets of) the semantic features in their corresponding documents. In Sections 4 and 5, we empirically demonstrate that the aforementioned two extensions lead to significant quality improvements for the two interestingness tasks presented in this paper.",
"cite_spans": [
{
"start": 140,
"end": 164,
"text": "(Krizhevshy et al. 2012;",
"ref_id": null
},
{
"start": 165,
"end": 179,
"text": "Markoff, 2014)",
"ref_id": "BIBREF28"
},
{
"start": 201,
"end": 220,
"text": "(Hinton et al. 2012",
"ref_id": "BIBREF21"
},
{
"start": 240,
"end": 263,
"text": "(Collobert et al. 2011)",
"ref_id": "BIBREF8"
},
{
"start": 281,
"end": 300,
"text": "(Huang et al. 2013)",
"ref_id": "BIBREF24"
},
{
"start": 315,
"end": 334,
"text": "(Huang et al. 2013)",
"ref_id": "BIBREF24"
},
{
"start": 1128,
"end": 1147,
"text": "Huang et al. (2013)",
"ref_id": "BIBREF24"
},
{
"start": 1535,
"end": 1560,
"text": "(Krizhevsky et al., 2013)",
"ref_id": null
},
{
"start": 1584,
"end": 1604,
"text": "(Deng et al., 2013a)",
"ref_id": "BIBREF11"
},
{
"start": 1881,
"end": 1900,
"text": "Huang et al. (2013)",
"ref_id": "BIBREF24"
},
{
"start": 2172,
"end": 2173,
"text": "1",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Before giving a formal description of the DSSM in Section 3, we formally define the interestingness function, and then introduce our data set of naturally occurring interest signals.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Let be the set of all documents. Following Gamon et al. (2013) , we formally define the interestingness modeling task as learning the mapping function:",
"cite_spans": [
{
"start": 43,
"end": 62,
"text": "Gamon et al. (2013)",
"ref_id": "BIBREF15"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "The Notion of Interestingness",
"sec_num": "2"
},
{
"text": ": \u2192",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The Notion of Interestingness",
"sec_num": "2"
},
{
"text": "where the function , is the quantified degree of interest that the user has in the target document \u2208 after or while reading the source document \u2208 . Our notion of a document is meant in its most general form as a string of raw unstructured text. That is, the interestingness function should not rely on any document structure such as title tags, hyperlinks, etc., or Web interaction data. In our tasks, documents can be formed either from the plain text of a webpage or as a text span in that plain text, as will be discussed in Sections 4 and 5.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The Notion of Interestingness",
"sec_num": "2"
},
{
"text": "We can observe many naturally occurring manifestations of interestingness on the Web. For example, on Twitter, users follow shared links embedded in tweets. Arguably the most frequent signal, however, occurs in Web browsing events where users click from one webpage to another via hyperlinks. When a user clicks on a hyperlink, it is reasonable to assume that she is interested in learning more about the anchor, modulo cases of erroneous clicks. Aggregate clicks can therefore serve as a proxy for interestingness. That is, for a given source document, target documents that attract the most clicks are more interesting than documents that attract fewer clicks 1 . Section 4), our task is to model interestingness between unstructured documents, i.e., without access to any document structure or Web interaction data. Thus, in our experiments,",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Data",
"sec_num": "2.1"
},
{
"text": "We collect a large dataset of user browsing events from a commercial Web browser. Specifically, we sample 18 million occurrences of a user click from one Wikipedia page to another during a one year period. We restrict our browsing events to Wikipedia since its pages tend to contain many anchors (79 on average, where on average 42 have a unique target URL). Thus, they attract enough traffic for us to obtain robust browsing transition data 2 . We group together all transitions originating from the same page and randomly hold out 20% of the transitions for our evaluation data (EVAL), 20% for training the DSSM described in Section 3.2 (TRAIN_1), and the remaining 60% for training our task specific rankers described in Section 3.3 (TRAIN_2). In our experiments, we used different settings for the two interestingness tasks. Thus, we postpone the detailed description of these datasets and other task-specific datasets to Sections 4 and 5.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Data",
"sec_num": "2.1"
},
{
"text": "This section presents the architecture of the DSSM, describes the parameter estimation, and the way the DSSM is used in our tasks.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "A Deep Semantic Similarity Model (DSSM)",
"sec_num": "3"
},
{
"text": "we remove all structural information (e.g., hyperlinks and XML tags) in our documents, except that in the highlighting experiments (Section 4) we use anchor texts to simulate the candidate keywords to be highlighted. We then convert each",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "A Deep Semantic Similarity Model (DSSM)",
"sec_num": "3"
},
{
"text": "The heart of the DSSM is a deep neural network with convolutional structure, as shown in Figure 1 . In what follows, we use lower-case bold letters, such as , to denote column vectors, to denote the element of , and upper-case letters, such as , to denote matrices. Input Layer . It takes two steps to convert a document , which is a sequence of words, into a vector representation for the input layer of the network: (1) convert each word in to a word vector, and (2) build by concatenating these word vectors. To convert a word into a word vector, we first represent by a one-hot vector using a vocabulary that contains high frequent words (",
"cite_spans": [],
"ref_spans": [
{
"start": 89,
"end": 98,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Network Architecture",
"sec_num": "3.1"
},
{
"text": "150K in this study). Then, following Huang et al. 2013, we map to a separate tri-letter vector. Consider the word \"#dog#\", where # is a word boundary symbol. The nonzero elements in its triletter vector are \"#do\", \"dog\", and \"og#\". We then form the word vector of by concatenating its one-hot vector and its tri-letter vector. It is worth noting that the tri-letter vector complements the one-hot vector representation in two aspects. First, different OOV (out of vocabulary) words can be represented by tri-letter vectors with few collisions. Second, spelling variations of the same word can be mapped to the points that are close to each other in the tri-letter space. Although the number of unique English words on the Web is extremely large, the total number of distinct triletters in English is limited (restricted to the most frequent 30K in this study). As a result, incorporating tri-letter vectors substantially improves the representation power of word vectors while keeping their size small.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Network Architecture",
"sec_num": "3.1"
},
{
"text": "To form our input layer using word vectors, we first identify a text span with a high degree of relevance, called focus, in using task-specific heuristics (see Sections 4 and 5 respectively). Second, we form by concatenating each word vector in the focus and a vector that is the summation of all other word vectors, as shown in Figure 1 . Since the length of the focus is much smaller than that of its document, is able to capture the contextual information (for the words in the focus)",
"cite_spans": [],
"ref_spans": [
{
"start": 329,
"end": 337,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Network Architecture",
"sec_num": "3.1"
},
{
"text": "Web document into plain text, which is white-space tokenized and lowercased. Numbers are retained and no stemming is performed. 2 We utilize the May 3, 2013 English Wikipedia dump consisting of roughly 4.1 million articles from http://dumps.wikimedia.org. ",
"cite_spans": [
{
"start": 128,
"end": 129,
"text": "2",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Network Architecture",
"sec_num": "3.1"
},
{
"text": "in a word sequence of length as follows. We first generate a contextual vector by concatenating the word vectors of and its surrounding words defined by a window (the window size is set to 3 in this paper). Then, we generate for each word a local feature vector using a tanh activation function and a linear projection matrix , which is the same across all windows in the word sequence, as:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Convolutional Layer . A convolutional layer extracts local features around each word",
"sec_num": null
},
{
"text": "tanh , where 1 \u2026 (1)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Convolutional Layer . A convolutional layer extracts local features around each word",
"sec_num": null
},
{
"text": "Max-pooling Layer . The size of the output depends on the number of words in the word sequence. Local feature vectors have to be combined to obtain a global feature vector, with a fixed size independent of the document length, in order to apply subsequent standard affine layers. We design by adopting the max operation over each \"time\" of the sequence of vectors computed by (1), which forces the network to retain only the most useful, partially invariant local features produced by the convolutional layer:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Convolutional Layer . A convolutional layer extracts local features around each word",
"sec_num": null
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "max ,\u2026, u",
"eq_num": "(2)"
}
],
"section": "Convolutional Layer . A convolutional layer extracts local features around each word",
"sec_num": null
},
{
"text": "where the max operation is performed for each dimension of across 1, \u2026 , respectively.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Convolutional Layer . A convolutional layer extracts local features around each word",
"sec_num": null
},
{
"text": "That convolutional and max-pooling layers are able to discover prominent keywords of a document can be demonstrated using the procedure in Figure 2 using a toy example. First, the convolutional layer of (1) generates for each word in a 5word document a 4-dimensional local feature vector, which represents a distribution of four topics. For example, the most prominent topic of within its three word context window is the first topic, denoted by 1 , and the most prominent topic of is 3 . Second, we use max-pooling of (2) to form a global feature vector, which represents the topic distribution of the whole document. We see that 1 and 3 are two prominent topics. Then, for each prominent topic, we trace back to the local feature vector that survives max-pooling:",
"cite_spans": [],
"ref_spans": [
{
"start": 139,
"end": 147,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Convolutional Layer . A convolutional layer extracts local features around each word",
"sec_num": null
},
{
"text": "1 max ,\u2026, 1 1 3 max ,\u2026, 3 3 .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Convolutional Layer . A convolutional layer extracts local features around each word",
"sec_num": null
},
{
"text": "Finally, we label the corresponding words of these local feature vectors, and , as keywords of the document. Figure 3 presents a sample of document snippets and their keywords detected by the DSSM according to the procedure elaborated in Figure 2 . It is interesting to see that many names are identified as keywords although the DSSM is not designed explicitly for named entity recognition.",
"cite_spans": [],
"ref_spans": [
{
"start": 109,
"end": 117,
"text": "Figure 3",
"ref_id": "FIGREF1"
},
{
"start": 238,
"end": 246,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Convolutional Layer . A convolutional layer extracts local features around each word",
"sec_num": null
},
{
"text": "Fully-Connected Layers and . The fixed sized global feature vector of (2) is then fed to several standard affine network layers, which are stacked and interleaved with nonlinear activation functions, to extract highly non-linear features at the output layer. In our model, shown in Figure 1 , we have:",
"cite_spans": [],
"ref_spans": [
{
"start": 282,
"end": 291,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Convolutional Layer . A convolutional layer extracts local features around each word",
"sec_num": null
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "tanh (3) tanh",
"eq_num": "(4)"
}
],
"section": "Convolutional Layer . A convolutional layer extracts local features around each word",
"sec_num": null
},
{
"text": "where and are learned linear projection matrices.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Convolutional Layer . A convolutional layer extracts local features around each word",
"sec_num": null
},
{
"text": "To optimize the parameters of the DSSM of Figure ",
"cite_spans": [],
"ref_spans": [
{
"start": 42,
"end": 48,
"text": "Figure",
"ref_id": null
}
],
"eq_spans": [],
"section": "Training the DSSM",
"sec_num": "3.2"
},
{
"text": "1, i.e.,",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Training the DSSM",
"sec_num": "3.2"
},
{
"text": ", , , we use a pair-wise rank loss as objective (Yih et al. 2011) . Consider a source document and two candidate target documents and , where is more interesting than to a user when reading . We construct two pairs of documents , and , , where the former is preferred and should have a higher",
"cite_spans": [
{
"start": 48,
"end": 65,
"text": "(Yih et al. 2011)",
"ref_id": "BIBREF41"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Training the DSSM",
"sec_num": "3.2"
},
{
"text": "u 1 u 2 u 3 u 4 u 5 w 1 w 2 w 3 w 4 w 5 2 3 4 1 w 1 w 2 w 3 w 4 w 5 v 2 3 4 1",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Training the DSSM",
"sec_num": "3.2"
},
{
"text": "Figure 2: Toy example of (upper) a 5-word document and its local feature vectors extracted using a convolutional layer, and (bottom) the global feature vector of the document generated after max-pooling.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Training the DSSM",
"sec_num": "3.2"
},
{
"text": "interestingness score. Let \u2206 be the difference of their interestingness scores:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Training the DSSM",
"sec_num": "3.2"
},
{
"text": "\u2206 , ,",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Training the DSSM",
"sec_num": "3.2"
},
{
"text": ", where is the interestingness score, computed as the cosine similarity:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Training the DSSM",
"sec_num": "3.2"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": ", \u2261 sim , \u2016 \u2016\u2016 \u2016",
"eq_num": "(5)"
}
],
"section": "Training the DSSM",
"sec_num": "3.2"
},
{
"text": "where and are the feature vectors of and , respectively, which are generated using the DSSM, parameterized by . Intuitively, we want to learn to maximize \u2206. That is, the DSSM is learned to represent documents as points in a hidden interestingness space, where the similarity between a document and its interesting documents is maximized. We use the following logistic loss over \u2206 , which can be shown to upper bound the pairwise accuracy:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Training the DSSM",
"sec_num": "3.2"
},
{
"text": "\u2206; log 1 exp \u2206 (6)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Training the DSSM",
"sec_num": "3.2"
},
{
"text": "The loss function in (6) has a shape similar to the hinge loss used in SVMs. Because of the use of the cosine similarity function, we add a scaling factor that magnifies \u2206 from [-2, 2] to a larger range. Empirically, the value of makes no difference as long as it is large enough. In the experiments, we set 10. Because the loss function is differentiable, optimizing the model parameters can be done using gradient-based methods. Due to space limitations, we omit the derivation of the gradient of the loss function, for which readers are referred to related derivations (e.g., Collobert et al. 2011; Huang et al. 2013; Shen et al. 2014) .",
"cite_spans": [
{
"start": 579,
"end": 601,
"text": "Collobert et al. 2011;",
"ref_id": "BIBREF8"
},
{
"start": 602,
"end": 620,
"text": "Huang et al. 2013;",
"ref_id": "BIBREF24"
},
{
"start": 621,
"end": 638,
"text": "Shen et al. 2014)",
"ref_id": "BIBREF35"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Training the DSSM",
"sec_num": "3.2"
},
{
"text": "In our experiments we trained DSSMs using mini-batch Stochastic Gradient Descent. Each mini-batch consists of 256 source-target document pairs. For each source document , we randomly select from that batch four target documents which are not paired with as negative training samples 3 . The DSSM trainer is implemented using a GPU-accelerated linear algebra library, which is developed on CUDA 5.5. Given the training set (TRAIN_1 in Section 2), it takes approximately 30 hours to train a DSSM as shown in Figure 1 , on a Xeon E5-2670 2.60GHz machine with one Tesla K20 GPU card.",
"cite_spans": [],
"ref_spans": [
{
"start": 506,
"end": 514,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Training the DSSM",
"sec_num": "3.2"
},
{
"text": "In principle, the loss function of (6) can be further regularized (e.g. by adding a term of 2 norm) to deal with overfitting. However, we did not find a clear empirical advantage over the simpler early stop approach in a pilot study, hence we adopted the latter in the experiments in this paper. Our approach adjusts the learning rate during the course of model training. Starting with 1.0, after each epoch (a pass over the entire training data), the learning rate is adjusted as 0.5 if the loss on validation data (held-out from TRAIN_1) is not reduced. The training stops if either is smaller than a preset threshold (0.0001) or the loss on training data can no longer be reduced significantly. In our experiments, the DSSM training typically converges within 20 epochs.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Training the DSSM",
"sec_num": "3.2"
},
{
"text": "We experiment with two ways of using the DSSM for the two interestingness tasks. First, we use the DSSM as a feature generator. The output layer of the DSSM can be seen as a set of semantic features, which can be incorporated in a boosted tree is to approximate the partition function using Noise Contrastive Estimation (Gutmann and Hyvarinen 2010) . We leave it to future work. \u2026 the comedy festival formerly known as the us comedy arts festival is a comedy festival held each year in las vegas nevada from its 1985 inception to 2008 . it was held annually at the wheeler opera house and other venues in aspen colorado . the primary sponsor of the festival was hbo with co-sponsorship by caesars palace . the primary venue tbs geico insurance twix candy bars and smirnoff vodka hbo exited the festival business in 2007 and tbs became the primary sponsor the festival includes standup comedy performances appearances by the casts of television shows\u2026 \u2026 bad samaritans is an american comedy series produced by walt becker kelly hayes and ross putman . it premiered on netflix on march 31 2013 cast and characters . the show focuses on a community service parole group and their parole officer brian kubach as jake gibson an aspiring professional starcraft player who gets sentenced to 2000 hours of community service for starting a forest fire during his breakup with drew prior to community service he had no real ambition in life other than to be a professional gamer and become wealthy overnight like mark zuckerberg as in life his goal during \u2026 based ranker (Friedman 1999 ) trained discriminatively on the task-specific data. Given a sourcetarget document pair , , the DSSM generates 600 features (300 from the output layers and for each and , respectively).",
"cite_spans": [
{
"start": 320,
"end": 348,
"text": "(Gutmann and Hyvarinen 2010)",
"ref_id": "BIBREF20"
},
{
"start": 1561,
"end": 1575,
"text": "(Friedman 1999",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Using the DSSM",
"sec_num": "3.3"
},
{
"text": "Second, we use the DSSM as a direct implementation of the interestingness function . Recall from Section 3.2 that in model training, we measure the interestingness score for a document pair using the cosine similarity between their corresponding feature vectors ( and ). Similarly at runtime, we define sim , as (5).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Using the DSSM",
"sec_num": "3.3"
},
{
"text": "Recall from Section 1 that in this task, a system must select most interesting keywords in a document that a user is reading. To evaluate our models using the click transition data described in Section 2, we simulate the task as follows. We use the set of anchors in a source document to simulate the set of candidate keywords that may be of interest to the user while reading , and treat the text of a document that is linked by an anchor in as a target document . As shown in Figure 1 , to apply DSSM to a specific task, we need to define the focus in source and target documents. In this task, the focus in s is defined as the anchor text, and the focus in t is defined as the first 10 tokens in t.",
"cite_spans": [],
"ref_spans": [
{
"start": 478,
"end": 486,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Experiments on Highlighting",
"sec_num": "4"
},
{
"text": "We evaluate the performance of a highlighting system against a gold standard interestingness function which scores the interestingness of an anchor as the number of user clicks on from the anchor in in our data. We consider the ideal selection to then consist of the most interesting anchors according to . A natural metric for this task is Normalized Discounted Cumulative Gain (NDCG) (Jarvelin and Kekalainen 2000) .",
"cite_spans": [
{
"start": 386,
"end": 416,
"text": "(Jarvelin and Kekalainen 2000)",
"ref_id": "BIBREF25"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments on Highlighting",
"sec_num": "4"
},
{
"text": "We evaluate our models on the EVAL dataset described in Section 2. We utilize the transition distributions in EVAL to create three other test sets, following the stratified sampling methodology commonly employed in the IR community, for the frequently, less frequently, and rarely viewed source pages, referred to as HEAD, TORSO, and TAIL, respectively. We obtain these sets by first sorting the unique source documents according to their frequency of occurrence in EVAL. We then partition the set so that HEAD corresponds to all transitions from the source pages at the top of the list that account for 20% of the transitions in EVAL; TAIL corresponds to the transitions at the bottom also accounting for 20% of the transitions in EVAL; and TORSO corresponds to the remaining transitions. Table 1 summarizes the results of various models over the three test sets using NDCG at truncation levels 1, 5, and 10.",
"cite_spans": [],
"ref_spans": [
{
"start": 790,
"end": 797,
"text": "Table 1",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Experiments on Highlighting",
"sec_num": "4"
},
{
"text": "Rows 1 to 3 are simple heuristic baselines. RAND selects random anchors, 1stK selects the first anchors and LastK the last anchors.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Main Results",
"sec_num": "4.1"
},
{
"text": "The other models in Table 1 are boosted tree based rankers trained on TRAIN_2 described in Section 2. They vary only in their features. The ranker in Row 4 uses Non-Semantic Features (NSF) only. These features are derived from the # Models HEAD TORSO TAIL @1 @5 @10 @1 @5 @10 @1 @5 @10 source document s and from user session information in the browser log. The document features include: position of the anchor in the document, frequency of the anchor, and anchor density in the paragraph. The rankers in Rows 5 to 12 use the NSF and the semantic features computed from source and target documents of a browsing transition. We compare semantic features derived from three different sources. The first feature source comes from our DSSMs (DSSM and DSSM_BOW) using the output layers as feature generators as described in Section 3.3. DSSM is the model described in Section 3 and DSSM_BOW is the model proposed by Huang et al. (2013) where documents are view as bag of words (BOW) and the convolutional and max-pooling layers are not used. The two other sources of semantic features are used as a point of comparison to the DSSM. One is a generative semantic model (Joint Transition Topic model, or JTT) (Gamon et al. 2013) . JTT is an LDA-style model (Blei et al. 2003 ) that is trained jointly on source and target documents linked by browsing transitions. JTT generates a total of 150 features from its latent variables, 50 each for the source topic model, the target topic model and the transition model. The other semantic model of contrast is a manually defined one, which we use to assess the effectiveness of automatically learned models against human modelers. To this effect, we use the page categories that editors assign in Wikipedia as semantic features (WCAT). These features number in the multiple thousands. Using features such as WCAT is not a viable solution in general since Wikipedia categories are not available for all documents. As such, we use it solely as a point of comparison against DSSM and JTT.",
"cite_spans": [
{
"start": 912,
"end": 931,
"text": "Huang et al. (2013)",
"ref_id": "BIBREF24"
},
{
"start": 1202,
"end": 1221,
"text": "(Gamon et al. 2013)",
"ref_id": "BIBREF15"
},
{
"start": 1250,
"end": 1267,
"text": "(Blei et al. 2003",
"ref_id": "BIBREF3"
}
],
"ref_spans": [
{
"start": 20,
"end": 27,
"text": "Table 1",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Main Results",
"sec_num": "4.1"
},
{
"text": "src",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Main Results",
"sec_num": "4.1"
},
{
"text": "We also distinguish between two types of learned rankers: those which draw their features only from the source (src only) document and those that draw their features from both the source and target (src+tar) documents. Although our task setting allows access to the content of both source and target documents, there are practical scenarios where a system should predict what interests the user without looking at the target document because the extra step of identifying a suitable target document for each candidate concept or entity of interest is computationally expensive.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Main Results",
"sec_num": "4.1"
},
{
"text": "As shown in Table 1 , NSF+DSSM, which incorporates our DSSM, is the overall best performing system across test sets. The task is hard as evidenced by the weak baseline scores. One reason is the large average number of candidates per page. On HEAD, we found an average of 170 anchors (of which 95 point to a unique target URL). For TORSO and TAIL, we found the average number of anchors to be 94 (52 unique targets) and 41 (19 unique targets), respectively.",
"cite_spans": [],
"ref_spans": [
{
"start": 12,
"end": 19,
"text": "Table 1",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Analysis of Results",
"sec_num": "4.2"
},
{
"text": "Clearly, the semantics of the documents form important signals for this task: WCAT, JTT, DSSM_BOW, and DSSM all significantly boost the performance over NSF alone. There are two interesting comparisons to consider: (a) manual semantics vs. learned semantics; and (b) deep semantic models vs. generative topic models. On (a), we observe somewhat surprisingly that the learned DSSM produces features that outperform the thousands of features coming from manually (editor) assigned Wikipedia category features (WCAT), in all but the TAIL where the two perform statistically the same. In contrast, features from the generative model (JTT) perform worse than WCAT across the board except on TAIL where JTT and WCAT are statistically tied. On (b), we observe that DSSM outperforms a stateof-the-art generative model (JTT) on HEAD and TORSO. On TAIL, they are statistically indistinguishable.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Analysis of Results",
"sec_num": "4.2"
},
{
"text": "We turn now to inspecting the scenario where features are only drawn from the source document (Rows 1-8 in Table 1 ). Again we observe that semantic features significantly boost the performance against NSF alone, however they significantly deteriorate when compared to using features from both source and target documents. In this scenario, the manual semantics from WCAT outperform all other models, but with a diminishing effect as we move from HEAD through TORSO to TAIL. DSSM is the best performing learned semantic model.",
"cite_spans": [],
"ref_spans": [
{
"start": 107,
"end": 114,
"text": "Table 1",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Analysis of Results",
"sec_num": "4.2"
},
{
"text": "Finally, we present the results to justify the two modifications we made to extend the model of Huang et al. (2013) to the DSSM, as described in Section 1. First, we see in Table 1 that DSSM_BOW, which has the same network structure of Huang et al.'s model, is much weaker than DSSM, demonstrating the benefits of using convolutional and max-pooling layers to extract semantic features for the highlighting task. Second, we conduct several experiments by using the cosine scores between the output layers of DSSM for and as features (following the procedure in Section 3.3 for using the DSSM as a direct implementation of ). We found that adding the cosine features to NSF+DSSM does not lead to any improvement. We also combined NSF with solely the cosine features from DSSM (i.e., without the other semantic features drawn from its output layers). But we still found no improvement over using NSF alone. Thus, we conclude that for this task it is much more effective to feed the features derived from DSSM to a supervised ranker than directly computing the interestingness score using cosine similarity in the learned semantic space, as in Huang et al. (2013) .",
"cite_spans": [
{
"start": 96,
"end": 115,
"text": "Huang et al. (2013)",
"ref_id": "BIBREF24"
},
{
"start": 1141,
"end": 1160,
"text": "Huang et al. (2013)",
"ref_id": "BIBREF24"
}
],
"ref_spans": [
{
"start": 173,
"end": 180,
"text": "Table 1",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Analysis of Results",
"sec_num": "4.2"
},
{
"text": "We construct the evaluation data set for this second task by randomly sampling a set of documents from a traffic-weighted set of Web documents. In a second step, we identify the entity names in each document using an in-house named entity recognizer. We issue each entity name as a query to a commercial search engine, and retain up to the top-100 retrieved documents as candidate target documents. We form for each entity a source document which consists of the entity text and its surrounding text defined by a 200-word window. We define the focus (as in Figure 1 ) in as the entity text, and the focus in as the first 10 tokens in .",
"cite_spans": [],
"ref_spans": [
{
"start": 557,
"end": 565,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Experiments on Entity Search",
"sec_num": "5"
},
{
"text": "The final evaluation data set contains 10,000 source documents. On average, each source document is associated with 87 target documents. Finally, the source-target document pairs are labeled in terms of interestingness by paid annotators. The label is on a 5-level scale, 0 to 4, with 4 meaning the target document is the most interesting to the source document and 0 meaning the target is of no interest.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments on Entity Search",
"sec_num": "5"
},
{
"text": "We test our models on two scenarios. The first is a ranking scenario where interesting documents are displayed to the user. Here, we select the top-ranked documents according to their interestingness scores. We measure the performance via NDCG at truncation levels 1 and 3. The second scenario is to display to the user all interesting results. In this scenario, we select all target documents with an interestingness score exceeding a predefined threshold. We evaluate this scenario using ROC analysis and, specifically, the area under the curve (AUC).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments on Entity Search",
"sec_num": "5"
},
{
"text": "The main results are summarized in Table 2 . Rows 1 to 6 are single model results, where each model is used as a direct implementation of the interestingness function . Rows 7 to 9 are ranker results, where is defined as a boosted tree based ranker that incorporates different sets of features extracted from source and target documents, including the features derived from single models. As in the highlighting experiments, all the machinelearned single models, including the DSSM, are trained on TRAIN_1, and all the rankers are trained on TRAIN_2. Table 2 ) is the classic document model (Robertson and Zaragoza 2009) . It uses the bag-of-words document representation and the BM25 term weighting function. In our setting, we define the interestingness score of a document pair as the dot product of their BM25weighted term vectors. To verify the importance of using contextual information, we compare two different ways of forming the term vector of a source document. The first only uses the entity text (Row 1). The second (Row 2) uses both the entity text and and its surrounding text in a 200word window (i.e., the entire source document).",
"cite_spans": [
{
"start": 591,
"end": 620,
"text": "(Robertson and Zaragoza 2009)",
"ref_id": "BIBREF34"
}
],
"ref_spans": [
{
"start": 35,
"end": 42,
"text": "Table 2",
"ref_id": "TABREF3"
},
{
"start": 551,
"end": 558,
"text": "Table 2",
"ref_id": "TABREF3"
}
],
"eq_spans": [],
"section": "Main Results",
"sec_num": "5.1"
},
{
"text": "Results show that the model using contextual information is significantly better. Therefore, all the other models in this section use both the entity texts and their surrounding text.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "BM25 (Rows 1 and 2 in",
"sec_num": null
},
{
"text": "WTM (Row 3) is our implementation of the word translation model for IR (Berger and Lafferty 1999; Gao et al. 2010) . WTM defines the interestingness score as: where | is the unigram probability of word in , and | is the probability of translating into , trained on source-target document pairs using EM (Brown et al. 1993 ). The translation-based approach allows any pair of non-identical but semantically related words to have a nonzero matching score. As a result, it significantly outperforms BM25.",
"cite_spans": [
{
"start": 71,
"end": 97,
"text": "(Berger and Lafferty 1999;",
"ref_id": "BIBREF2"
},
{
"start": 98,
"end": 114,
"text": "Gao et al. 2010)",
"ref_id": "BIBREF16"
},
{
"start": 303,
"end": 321,
"text": "(Brown et al. 1993",
"ref_id": "BIBREF5"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "BM25 (Rows 1 and 2 in",
"sec_num": null
},
{
"text": ", \u220f \u2211 | | \u2208 \u2208 , #",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "BM25 (Rows 1 and 2 in",
"sec_num": null
},
{
"text": "BTLM (Row 4) follows the best performing bilingual topic model described in Gao et al. (2011) , which is an extension of PLSA (Hofmann 1999) . The model is trained on source-target document pairs using the EM algorithm with a constraint enforcing a source document and its target document to not only share the same prior topic distribution, but to also have similar fractions of words assigned to each topic. BLTM defines the interestingness score between s and t as:",
"cite_spans": [
{
"start": 76,
"end": 93,
"text": "Gao et al. (2011)",
"ref_id": "BIBREF18"
},
{
"start": 126,
"end": 140,
"text": "(Hofmann 1999)",
"ref_id": "BIBREF23"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "BM25 (Rows 1 and 2 in",
"sec_num": null
},
{
"text": ", \u220f \u2211 | | \u2208 .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "BM25 (Rows 1 and 2 in",
"sec_num": null
},
{
"text": "The model assumes the following story of generating from . First, for each topic a word distribution is selected from a Dirichlet prior with concentration parameter . Second, given , a topic distribution is drawn from a Dirichlet prior with parameter . Finally, is generated word by word. Each word is generated by first selecting a topic according to , and then drawing a word from",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "BM25 (Rows 1 and 2 in",
"sec_num": null
},
{
"text": ". We see that BLTM models interestingness by taking into account the semantic topic distribution of the entire documents. Our results in Table 2 show that BLTM outperforms WTM by a significant margin in both NDCG and AUC.",
"cite_spans": [],
"ref_spans": [
{
"start": 137,
"end": 144,
"text": "Table 2",
"ref_id": "TABREF3"
}
],
"eq_spans": [],
"section": "BM25 (Rows 1 and 2 in",
"sec_num": null
},
{
"text": "DSSM (Row 5) outperforms all the competing single models, including the state-of-the-art topic model BLTM. Now, we inspect the difference between DSSM and BLTM in detail. Although both models strive to generate the semantic representation of a document, they use different modeling approaches. BLTM by nature is a generative model. The semantic representation in BLTM is a distribution of hidden semantic topics. Such a distribution is learned using Maximum Likelihood Estimation in an unsupervised manner, i.e., maximizing the log-likelihood of the source-target document pairs in the training data. On the other hand, DSSM represents documents as points in a hidden semantic space using a supervised learning method, i.e., paired documents are closer in that latent space than unpaired ones. We believe that the superior performance of DSSM is largely due to the fact that the model parameters are discriminatively trained using an objective that is tailored to the interestingness task.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "BM25 (Rows 1 and 2 in",
"sec_num": null
},
{
"text": "In addition to the difference in training methods, DSSM and BLTM also use different model structures. BLTM treats a document as a bag of words (thus losing some important contextual information such as word order and inter-word dependencies), and generates semantic representations of documents using linear projection. DSSM, on the other hand, treats text as a sequence of words and better captures local and global context, and generates highly non-linear semantic features via a deep neural network. To further verify our analysis, we inspect the results of a variant of DSSM, denoted as DSSM_BOW (Row 6), where the convolution and max-pooling layers are removed. This model treats a document as a bag of words, just like BLTM. These results demonstrate that the effectiveness of DSSM can also be attributed to the convolutional architecture in the neural network, in addition to being deep and being discriminative.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "BM25 (Rows 1 and 2 in",
"sec_num": null
},
{
"text": "We turn now to discussing the ranker results in Rows 7 to 9. The baseline ranker (Row 7) uses 158 features, including many counts and single model scores, such as BM25 and WMT. DSSM (Row 5) alone is quite effective, being close in performance to the baseline ranker with non-DSSM features. Integrating the DSSM score computed in (5) as one single feature into the ranker (Row 8) leads to a significant improvement over the baseline. The best performing combination (Row 9) is obtained by incorporating the DSSM feature vectors of source and target documents (i.e., 600 features in total) in the ranker.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "BM25 (Rows 1 and 2 in",
"sec_num": null
},
{
"text": "We thus conclude that on both tasks, automatic highlighting and contextual entity search, features drawn from the output layers of our deep semantic model result in significant gains after being added to a set of non-semantic features, and in comparison to other types of semantic models used in the past.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "BM25 (Rows 1 and 2 in",
"sec_num": null
},
{
"text": "In addition to the notion of relevance as described in Section 1, related to interestingness is also the notion of salience (also called aboutness) (Gamon et al. 2013; 2014; Parajpe 2009; Yih et al. 2006) . Salience is the centrality of a term to the content of a document. Although salience and interestingness interact, the two are not the same. For example, in a news article about President Obama's visit to Seattle, Obama is salient, yet the average user would probably not be interested in learning more about Obama while reading that article.",
"cite_spans": [
{
"start": 148,
"end": 167,
"text": "(Gamon et al. 2013;",
"ref_id": "BIBREF15"
},
{
"start": 168,
"end": 173,
"text": "2014;",
"ref_id": "BIBREF28"
},
{
"start": 174,
"end": 187,
"text": "Parajpe 2009;",
"ref_id": null
},
{
"start": 188,
"end": 204,
"text": "Yih et al. 2006)",
"ref_id": "BIBREF40"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "6"
},
{
"text": "There are many systems that identify popular content in the Web or recommend content (e.g., Bandari et al. 2012; Lerman and Hogg 2010; Szabo and Huberman 2010) , which is closely related to the highlighting task. In contrast to these approaches, we strive to predict what term a user is likely to be interested in when reading content, which may or may not be the same as the most popular content that is related to the current document. It has empirically been demonstrated in Gamon et al. (2013) that popularity is in fact a rather poor predictor for interestingness. The task of contextual entity search, which is formulated as an information retrieval problem in this paper, is also related to research on entity resolution (Stefanidis et al. 2013) .",
"cite_spans": [
{
"start": 92,
"end": 112,
"text": "Bandari et al. 2012;",
"ref_id": "BIBREF0"
},
{
"start": 113,
"end": 134,
"text": "Lerman and Hogg 2010;",
"ref_id": "BIBREF27"
},
{
"start": 135,
"end": 159,
"text": "Szabo and Huberman 2010)",
"ref_id": "BIBREF38"
},
{
"start": 478,
"end": 497,
"text": "Gamon et al. (2013)",
"ref_id": "BIBREF15"
},
{
"start": 728,
"end": 752,
"text": "(Stefanidis et al. 2013)",
"ref_id": "BIBREF37"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "6"
},
{
"text": "Latent Semantic Analysis (Deerwester et al. 1990 ) is arguably the earliest semantic model designed for IR. Generative topic models widely used for IR include PLSA (Hofmann 1990 ) and LDA (Blei et al. 2003) . Recently, these models have been extended to handle cross-lingual cases, where there are pairs of corresponding documents in different languages (e.g., Dumais et al. 1997; Gao et al. 2011; Platt et al. 2010; Yih et al. 2011) .",
"cite_spans": [
{
"start": 25,
"end": 48,
"text": "(Deerwester et al. 1990",
"ref_id": "BIBREF9"
},
{
"start": 164,
"end": 177,
"text": "(Hofmann 1990",
"ref_id": null
},
{
"start": 188,
"end": 206,
"text": "(Blei et al. 2003)",
"ref_id": "BIBREF3"
},
{
"start": 361,
"end": 380,
"text": "Dumais et al. 1997;",
"ref_id": "BIBREF12"
},
{
"start": 381,
"end": 397,
"text": "Gao et al. 2011;",
"ref_id": "BIBREF18"
},
{
"start": 398,
"end": 416,
"text": "Platt et al. 2010;",
"ref_id": "BIBREF32"
},
{
"start": 417,
"end": 433,
"text": "Yih et al. 2011)",
"ref_id": "BIBREF41"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "6"
},
{
"text": "By exploiting deep architectures, deep learning techniques are able to automatically discover from training data the hidden structures and the associated features at different levels of abstraction useful for a variety of tasks (e.g., Collobert et al. 2011; Hinton et al. 2012; Socher et al. 2012; Krizhevsky et al., 2012; Gao et al. 2014) . Hinton and Salakhutdinov (2010) propose the most original approach based on an unsupervised version of the deep neural network to discover the hierarchical semantic structure embedded in queries and documents. Huang et al. (2013) significantly extends the approach so that the deep neural network can be trained on large-scale query-document pairs giving much better performance. The use of the convolutional neural network for text processing, central to our DSSM, was also described in Collobert et al. (2011) and Shen et al. (2014) but with very different applications. The DSSM described in Section 3 can be viewed as a variant of the deep neural network models used in these previous studies.",
"cite_spans": [
{
"start": 235,
"end": 257,
"text": "Collobert et al. 2011;",
"ref_id": "BIBREF8"
},
{
"start": 258,
"end": 277,
"text": "Hinton et al. 2012;",
"ref_id": "BIBREF21"
},
{
"start": 278,
"end": 297,
"text": "Socher et al. 2012;",
"ref_id": "BIBREF36"
},
{
"start": 298,
"end": 322,
"text": "Krizhevsky et al., 2012;",
"ref_id": "BIBREF26"
},
{
"start": 323,
"end": 339,
"text": "Gao et al. 2014)",
"ref_id": "BIBREF17"
},
{
"start": 342,
"end": 373,
"text": "Hinton and Salakhutdinov (2010)",
"ref_id": "BIBREF22"
},
{
"start": 552,
"end": 571,
"text": "Huang et al. (2013)",
"ref_id": "BIBREF24"
},
{
"start": 830,
"end": 853,
"text": "Collobert et al. (2011)",
"ref_id": "BIBREF8"
},
{
"start": 858,
"end": 876,
"text": "Shen et al. (2014)",
"ref_id": "BIBREF35"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "6"
},
{
"text": "Modeling interestingness is fundamental to many online recommendation systems. We obtain naturally occurring interest signals by observing Web browsing transitions where users click from one webpage to another. We propose to model this \"interestingness\" with a deep semantic similarity model (DSSM), based on deep neural networks with special convolutional-pooling structure, mapping source-target document pairs to feature vectors in a latent semantic space. We train the DSSM using browsing transitions between documents. Finally, we demonstrate the effectiveness of our model on two interestingness tasks: automatic highlighting and contextual entity search. Our results on large-scale, real-world datasets show that the semantics of documents computed by the DSSM are important for modeling interestingness and that the new model leads to significant improvements on both tasks. DSSM is shown to outperform not only the classic document models that do not use (latent) semantics but also stateof-the-art topic models that do not have the deep and convolutional architecture characterizing the DSSM.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions",
"sec_num": "7"
},
{
"text": "One area of future work is to extend our method to model interestingness given an entire user session, which consists of a sequence of browsing events. We believe that the prior browsing and interaction history recorded in the session provides additional signals for predicting interestingness. To capture such signals, our model needs to be extended to adequately represent time series (e.g., causal relations and consequences of actions). One potentially effective model for such a purpose is based on the architecture of recurrent neural networks (e.g., Mikolov et al. 2010; Chen and Deng, 2014) , which can be incorporated into the deep semantic model proposed in this paper.",
"cite_spans": [
{
"start": 557,
"end": 577,
"text": "Mikolov et al. 2010;",
"ref_id": null
},
{
"start": 578,
"end": 598,
"text": "Chen and Deng, 2014)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions",
"sec_num": "7"
},
{
"text": "Yelong Shen (Microsoft Research, One Microsoft Way, Redmond, WA 98052, USA, email: [email protected]).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Additional Authors",
"sec_num": null
},
{
"text": "In our experiments, we observed better results by sampling more negative training examples (e.g., up to 100) although this makes the training much slower. An alternative approach",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "The authors thank Johnson Apacible, Pradeep Chilakamarri, Edward Guo, Bernhard Kohlmeier, Xiaolong Li, Kevin Powell, Xinying Song and Ye-Yi Wang for their guidance and valuable discussions. We also thank the three anonymous reviewers for their comments.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgments",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "The pulse of news in social media: forecasting popularity",
"authors": [
{
"first": "R",
"middle": [],
"last": "Bandari",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Asur",
"suffix": ""
},
{
"first": "B",
"middle": [
"A"
],
"last": "Huberman",
"suffix": ""
}
],
"year": 2012,
"venue": "ICWSM",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Bandari, R., Asur, S., and Huberman, B. A. 2012. The pulse of news in social media: forecasting popularity. In ICWSM.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Learning deep architectures for AI. Fundamental Trends in Machine Learning",
"authors": [
{
"first": "Y",
"middle": [],
"last": "Bengio",
"suffix": ""
}
],
"year": 2009,
"venue": "",
"volume": "2",
"issue": "",
"pages": "1--127",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Bengio, Y., 2009. Learning deep architectures for AI. Fundamental Trends in Machine Learning, 2(1):1-127.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Information retrieval as statistical translation",
"authors": [
{
"first": "A",
"middle": [],
"last": "Berger",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Lafferty",
"suffix": ""
}
],
"year": 1999,
"venue": "SIGIR",
"volume": "",
"issue": "",
"pages": "222--229",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Berger, A., and Lafferty, J. 1999. Information re- trieval as statistical translation. In SIGIR, pp. 222-229.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Latent Dirichlet allocation",
"authors": [
{
"first": "D",
"middle": [
"M"
],
"last": "Blei",
"suffix": ""
},
{
"first": "A",
"middle": [
"Y"
],
"last": "Ng",
"suffix": ""
},
{
"first": "Jordan",
"middle": [],
"last": "",
"suffix": ""
},
{
"first": "M",
"middle": [
"J"
],
"last": "",
"suffix": ""
}
],
"year": 2003,
"venue": "Journal of Machine Learning Research",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Blei, D. M., Ng, A. Y., and Jordan, M. J. 2003. Latent Dirichlet allocation. Journal of Machine Learning Research, 3.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "A semantic approach to contextual advertising",
"authors": [
{
"first": "A",
"middle": [],
"last": "Broder",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Fontoura",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Josifovski",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Riedel",
"suffix": ""
}
],
"year": 2007,
"venue": "SIGIR",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Broder, A., Fontoura, M., Josifovski, V., and Riedel, L. 2007. A semantic approach to contex- tual advertising. In SIGIR.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "The mathematics of statistical machine translation: parameter estimation",
"authors": [
{
"first": "P",
"middle": [
"F"
],
"last": "Brown",
"suffix": ""
},
{
"first": "S",
"middle": [
"A"
],
"last": "Della Pietra",
"suffix": ""
},
{
"first": "V",
"middle": [
"J"
],
"last": "Della Pietra",
"suffix": ""
},
{
"first": "R",
"middle": [
"L"
],
"last": "Mercer",
"suffix": ""
}
],
"year": 1993,
"venue": "Computational Linguistics",
"volume": "19",
"issue": "2",
"pages": "263--311",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Brown, P. F., Della Pietra, S. A., Della Pietra, V. J., and Mercer, R. L. 1993. The mathematics of statistical machine translation: parameter esti- mation. Computational Linguistics, 19(2):263- 311.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Learning to rank using gradient descent",
"authors": [
{
"first": "C",
"middle": [],
"last": "Burges",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Shaked",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Renshaw",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Lazier",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Deeds",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Hamilton",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Hullender",
"suffix": ""
}
],
"year": 2005,
"venue": "ICML",
"volume": "",
"issue": "",
"pages": "89--96",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Burges, C., Shaked, T., Renshaw, E., Lazier, A., Deeds, M., Hamilton, and Hullender, G. 2005. Learning to rank using gradient descent. In ICML, pp. 89-96.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "A primal-dual method for training recurrent neural networks constrained by the echo-state property",
"authors": [
{
"first": "J",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Deng",
"suffix": ""
}
],
"year": 2014,
"venue": "ICLR",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chen, J. and Deng, L. 2014. A primal-dual method for training recurrent neural networks con- strained by the echo-state property. In ICLR.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Natural language processing (almost) from scratch",
"authors": [
{
"first": "R",
"middle": [],
"last": "Collobert",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Weston",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Bottou",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Karlen",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Kavukcuoglu",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Kuksa",
"suffix": ""
}
],
"year": 2011,
"venue": "Journal of Machine Learning Research",
"volume": "12",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Collobert, R., Weston, J., Bottou, L., Karlen, M., Kavukcuoglu, K., and Kuksa, P., 2011. Natural language processing (almost) from scratch. Journal of Machine Learning Research, vol. 12.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Indexing by latent semantic analysis",
"authors": [
{
"first": "S",
"middle": [],
"last": "Deerwester",
"suffix": ""
},
{
"first": "S",
"middle": [
"T"
],
"last": "Dumais",
"suffix": ""
},
{
"first": "G",
"middle": [
"W"
],
"last": "Furnas",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Landauer",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Harshman",
"suffix": ""
}
],
"year": 1990,
"venue": "Journal of the American Society for Information Science",
"volume": "41",
"issue": "6",
"pages": "391--407",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Deerwester, S., Dumais, S. T., Furnas, G. W., Landauer, T., and Harshman, R. 1990. Indexing by latent semantic analysis. Journal of the American Society for Information Science, 41(6): 391-407",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "New types of deep neural network learning for speech recognition and related applications: An overview",
"authors": [
{
"first": "L",
"middle": [],
"last": "Deng",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Hinton",
"suffix": ""
},
{
"first": "B",
"middle": [],
"last": "Kingsbury",
"suffix": ""
}
],
"year": 2013,
"venue": "ICASSP",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Deng, L., Hinton, G., and Kingsbury, B. 2013. New types of deep neural network learning for speech recognition and related applications: An overview. In ICASSP.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "A deep convolutional neural network using heterogeneous pooling for trading acoustic invariance with phonetic confusion",
"authors": [
{
"first": "L",
"middle": [],
"last": "Deng",
"suffix": ""
},
{
"first": "O",
"middle": [],
"last": "Abdel-Hamid",
"suffix": ""
},
{
"first": "Yu",
"middle": [],
"last": "",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "",
"suffix": ""
}
],
"year": 2013,
"venue": "ICASSP",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Deng, L., Abdel-Hamid, O., and Yu, D., 2013a. A deep convolutional neural network using heter- ogeneous pooling for trading acoustic invari- ance with phonetic confusion. In ICASSP.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Automatic cross-linguistic information retrieval using latent semantic indexing",
"authors": [
{
"first": "S",
"middle": [
"T"
],
"last": "Dumais",
"suffix": ""
},
{
"first": "T",
"middle": [
"A"
],
"last": "Letsche",
"suffix": ""
},
{
"first": "M",
"middle": [
"L"
],
"last": "Littman",
"suffix": ""
},
{
"first": "T",
"middle": [
"K"
],
"last": "Landauer",
"suffix": ""
}
],
"year": 1997,
"venue": "AAAI-97 Spring Symposium Series: Cross-Language Text and Speech Retrieval",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Dumais, S. T., Letsche, T. A., Littman, M. L., and Landauer, T. K. 1997. Automatic cross-linguis- tic information retrieval using latent semantic indexing. In AAAI-97 Spring Symposium Series: Cross-Language Text and Speech Retrieval.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Greedy function approximation: a gradient boosting machine",
"authors": [
{
"first": "J",
"middle": [
"H"
],
"last": "Friedman",
"suffix": ""
}
],
"year": 1999,
"venue": "Annals of Statistics",
"volume": "29",
"issue": "",
"pages": "1189--1232",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Friedman, J. H. 1999. Greedy function approxi- mation: a gradient boosting machine. Annals of Statistics, 29:1189-1232.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Predicting interesting things in text",
"authors": [
{
"first": "M",
"middle": [],
"last": "Gamon",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Mukherjee",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Pantel",
"suffix": ""
}
],
"year": 2014,
"venue": "COLING",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Gamon, M., Mukherjee, A., Pantel, P. 2014. Pre- dicting interesting things in text. In COLING.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Identifying salient entities in web pages",
"authors": [
{
"first": "M",
"middle": [],
"last": "Gamon",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Yano",
"suffix": ""
},
{
"first": "X",
"middle": [],
"last": "Song",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Apacible",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Pantel",
"suffix": ""
}
],
"year": 2013,
"venue": "CIKM",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Gamon, M., Yano, T., Song, X., Apacible, J. and Pantel, P. 2013. Identifying salient entities in web pages. In CIKM.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Clickthroughbased translation models for web search: from word models to phrase models",
"authors": [
{
"first": "J",
"middle": [],
"last": "Gao",
"suffix": ""
},
{
"first": "X",
"middle": [],
"last": "He",
"suffix": ""
},
{
"first": "J-Y",
"middle": [],
"last": "Nie",
"suffix": ""
}
],
"year": 2010,
"venue": "CIKM",
"volume": "",
"issue": "",
"pages": "1139--1148",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Gao, J., He, X., and Nie, J-Y. 2010. Clickthrough- based translation models for web search: from word models to phrase models. In CIKM. pp. 1139-1148.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Learning continuous phrase representations for translation modeling",
"authors": [
{
"first": "J",
"middle": [],
"last": "Gao",
"suffix": ""
},
{
"first": "X",
"middle": [],
"last": "He",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Yih",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Deng",
"suffix": ""
}
],
"year": 2014,
"venue": "ACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Gao, J., He, X., Yih, W-t., and Deng, L. 2014. Learning continuous phrase representations for translation modeling. In ACL.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Clickthrough-based latent semantic models for web search",
"authors": [
{
"first": "J",
"middle": [],
"last": "Gao",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Toutanova",
"suffix": ""
},
{
"first": "W-T",
"middle": [],
"last": "Yih",
"suffix": ""
}
],
"year": 2011,
"venue": "SIGIR",
"volume": "",
"issue": "",
"pages": "675--684",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Gao, J., Toutanova, K., Yih., W-T. 2011. Click- through-based latent semantic models for web search. In SIGIR. pp. 675-684.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Speech recognition with deep recurrent neural networks",
"authors": [
{
"first": "A",
"middle": [],
"last": "Graves",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Mohamed",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Hinton",
"suffix": ""
}
],
"year": 2013,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Graves, A., Mohamed, A., and Hinton, G. 2013. Speech recognition with deep recurrent neural networks. In ICASSP.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "Noise-contrastive estimation: a new estimation principle for unnormalized statistical models",
"authors": [
{
"first": "M",
"middle": [],
"last": "Gutmann",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Hyvarinen",
"suffix": ""
}
],
"year": 2010,
"venue": "Proc. Int. Conf. on Artificial Intelligence and Statistics (AISTATS2010)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Gutmann, M. and Hyvarinen, A. 2010. Noise-con- trastive estimation: a new estimation principle for unnormalized statistical models. In Proc. Int. Conf. on Artificial Intelligence and Statis- tics (AISTATS2010).",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "Deep neural networks for acoustic modeling in speech recognition",
"authors": [
{
"first": "G",
"middle": [],
"last": "Hinton",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Deng",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Yu",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Dahl",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Mohamed",
"suffix": ""
},
{
"first": "N",
"middle": [],
"last": "Jaitly",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Senior",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Vanhoucke",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Nguyen",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Sainath",
"suffix": ""
},
{
"first": "B",
"middle": [],
"last": "Kingsbury",
"suffix": ""
}
],
"year": 2012,
"venue": "IEEE Signal Processing Magazine",
"volume": "29",
"issue": "",
"pages": "82--97",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hinton, G., Deng, L., Yu, D., Dahl, G., Mohamed, A., Jaitly, N., Senior, A., Vanhoucke, V., Ngu- yen, P., Sainath, T., and Kingsbury, B., 2012. Deep neural networks for acoustic modeling in speech recognition. IEEE Signal Processing Magazine, 29:82-97.",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "Discovering binary codes for documents by learning deep generative models",
"authors": [
{
"first": "G",
"middle": [],
"last": "Hinton",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Salakhutdinov",
"suffix": ""
}
],
"year": 2010,
"venue": "Topics in Cognitive Science",
"volume": "",
"issue": "",
"pages": "1--18",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hinton, G., and Salakhutdinov, R., 2010. Discov- ering binary codes for documents by learning deep generative models. Topics in Cognitive Science, pp. 1-18.",
"links": null
},
"BIBREF23": {
"ref_id": "b23",
"title": "Probabilistic latent semantic indexing",
"authors": [
{
"first": "T",
"middle": [],
"last": "Hofmann",
"suffix": ""
}
],
"year": 1999,
"venue": "SIGIR",
"volume": "",
"issue": "",
"pages": "50--57",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hofmann, T. 1999. Probabilistic latent semantic indexing. In SIGIR. pp. 50-57.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "Learning deep structured semantic models for web search using clickthrough data",
"authors": [
{
"first": "P",
"middle": [],
"last": "Huang",
"suffix": ""
},
{
"first": "X",
"middle": [],
"last": "He",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Gao",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Deng",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Acero",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Heck",
"suffix": ""
}
],
"year": 2013,
"venue": "CIKM",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Huang, P., He, X., Gao, J., Deng, L., Acero, A., and Heck, L. 2013. Learning deep structured se- mantic models for web search using click- through data. In CIKM.",
"links": null
},
"BIBREF25": {
"ref_id": "b25",
"title": "IR evaluation methods for retrieving highly relevant documents",
"authors": [
{
"first": "K",
"middle": [],
"last": "Jarvelin",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Kekalainen",
"suffix": ""
}
],
"year": 2000,
"venue": "SIGIR",
"volume": "",
"issue": "",
"pages": "41--48",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jarvelin, K. and Kekalainen, J. 2000. IR evalua- tion methods for retrieving highly relevant doc- uments. In SIGIR. pp. 41-48.",
"links": null
},
"BIBREF26": {
"ref_id": "b26",
"title": "ImageNet classification with deep convolutional neural networks",
"authors": [
{
"first": "A",
"middle": [],
"last": "Krizhevsky",
"suffix": ""
},
{
"first": "I",
"middle": [],
"last": "Sutskever",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Hinton",
"suffix": ""
}
],
"year": 2012,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Krizhevsky, A., Sutskever, I. and Hinton, G. 2012. ImageNet classification with deep convo- lutional neural networks. In NIPS.",
"links": null
},
"BIBREF27": {
"ref_id": "b27",
"title": "Using a model of social dynamics to predict popularity of news",
"authors": [
{
"first": "K",
"middle": [],
"last": "Lerman",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Hogg",
"suffix": ""
}
],
"year": 2010,
"venue": "WWW",
"volume": "",
"issue": "",
"pages": "621--630",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Lerman, K., and Hogg, T. 2010. Using a model of social dynamics to predict popularity of news. In WWW. pp. 621-630.",
"links": null
},
"BIBREF28": {
"ref_id": "b28",
"title": "Computer eyesight gets a lot more accurate",
"authors": [
{
"first": "J",
"middle": [],
"last": "Markoff",
"suffix": ""
}
],
"year": 2014,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Markoff, J. 2014. Computer eyesight gets a lot more accurate. In New York Times.",
"links": null
},
"BIBREF30": {
"ref_id": "b30",
"title": "Recurrent neural network based language model",
"authors": [
{
"first": "M",
"middle": [],
"last": "Karafiat",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Burget",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Cernocky",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Khudanpur",
"suffix": ""
}
],
"year": 2010,
"venue": "INTERSPEECH",
"volume": "",
"issue": "",
"pages": "1045--1048",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Karafiat, M., Burget, L., Cernocky, J., and Khudanpur, S. 2010. Recurrent neural network based language model. In INTERSPEECH. pp. 1045-1048.",
"links": null
},
"BIBREF31": {
"ref_id": "b31",
"title": "Learning document aboutness from implicit user feedback and document structure",
"authors": [
{
"first": "D",
"middle": [],
"last": "Paranjpe",
"suffix": ""
}
],
"year": 2009,
"venue": "CIKM",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Paranjpe, D. 2009. Learning document aboutness from implicit user feedback and document structure. In CIKM.",
"links": null
},
"BIBREF32": {
"ref_id": "b32",
"title": "Translingual document representations from discriminative projections",
"authors": [
{
"first": "J",
"middle": [],
"last": "Platt",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Toutanova",
"suffix": ""
},
{
"first": "Yih",
"middle": [],
"last": "",
"suffix": ""
},
{
"first": "W",
"middle": [],
"last": "",
"suffix": ""
}
],
"year": 2010,
"venue": "EMNLP",
"volume": "",
"issue": "",
"pages": "251--261",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Platt, J., Toutanova, K., and Yih, W. 2010. Translingual document representations from discriminative projections. In EMNLP. pp. 251- 261.",
"links": null
},
"BIBREF34": {
"ref_id": "b34",
"title": "The probabilistic relevance framework: BM25 and beyond",
"authors": [
{
"first": "S",
"middle": [],
"last": "Robertson",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "Zaragoza",
"suffix": ""
}
],
"year": 2009,
"venue": "Foundations and Trends in Information Retrieval",
"volume": "3",
"issue": "4",
"pages": "333--389",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Robertson, S., and Zaragoza, H. 2009. The proba- bilistic relevance framework: BM25 and be- yond. Foundations and Trends in Information Retrieval, 3(4):333-389.",
"links": null
},
"BIBREF35": {
"ref_id": "b35",
"title": "A latent semantic model with convolutional-pooling structure for information retrieval",
"authors": [
{
"first": "Y",
"middle": [],
"last": "Shen",
"suffix": ""
},
{
"first": "X",
"middle": [],
"last": "He",
"suffix": ""
},
{
"first": "",
"middle": [
"J"
],
"last": "Gao",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Deng",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Mesnil",
"suffix": ""
}
],
"year": 2014,
"venue": "CIKM",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Shen, Y., He, X., Gao. J., Deng, L., and Mesnil, G. 2014. A latent semantic model with convolu- tional-pooling structure for information re- trieval. In CIKM.",
"links": null
},
"BIBREF36": {
"ref_id": "b36",
"title": "Semantic compositionality through recursive matrix-vector spaces",
"authors": [
{
"first": "R",
"middle": [],
"last": "Socher",
"suffix": ""
},
{
"first": "B",
"middle": [],
"last": "Huval",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Manning",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Ng",
"suffix": ""
}
],
"year": 2012,
"venue": "EMNLP",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Socher, R., Huval, B., Manning, C., Ng, A., 2012. Semantic compositionality through recursive matrix-vector spaces. In EMNLP.",
"links": null
},
"BIBREF37": {
"ref_id": "b37",
"title": "Entity resolution in the web of data. CIKM'13 Tutorial",
"authors": [
{
"first": "K",
"middle": [],
"last": "Stefanidis",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Efthymiou",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Herschel",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Christophides",
"suffix": ""
}
],
"year": 2013,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Stefanidis, K., Efthymiou, V., Herschel, M., and Christophides, V. 2013. Entity resolution in the web of data. CIKM'13 Tutorial.",
"links": null
},
"BIBREF38": {
"ref_id": "b38",
"title": "Predicting the popularity of online content",
"authors": [
{
"first": "G",
"middle": [],
"last": "Szabo",
"suffix": ""
},
{
"first": "B",
"middle": [
"A"
],
"last": "Huberman",
"suffix": ""
}
],
"year": 2010,
"venue": "Communications of the ACM",
"volume": "53",
"issue": "8",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Szabo, G., and Huberman, B. A. 2010. Predicting the popularity of online content. Communica- tions of the ACM, 53(8).",
"links": null
},
"BIBREF39": {
"ref_id": "b39",
"title": "Adapting boosting for information retrieval measures",
"authors": [
{
"first": "Q",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "C",
"middle": [
"J C"
],
"last": "Burges",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Svore",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Gao",
"suffix": ""
}
],
"year": 2009,
"venue": "Journal of Information Retrieval",
"volume": "13",
"issue": "3",
"pages": "254--270",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Wu, Q., Burges, C.J.C., Svore, K., and Gao, J. 2009. Adapting boosting for information re- trieval measures. Journal of Information Re- trieval, 13(3):254-270.",
"links": null
},
"BIBREF40": {
"ref_id": "b40",
"title": "Finding advertising keywords on web pages",
"authors": [
{
"first": "W",
"middle": [],
"last": "Yih",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Goodman",
"suffix": ""
},
{
"first": "V",
"middle": [
"R"
],
"last": "Carvalho",
"suffix": ""
}
],
"year": 2006,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yih, W., Goodman, J., and Carvalho, V. R. 2006. Finding advertising keywords on web pages. In WWW.",
"links": null
},
"BIBREF41": {
"ref_id": "b41",
"title": "Learning discriminative projections for text similarity measures",
"authors": [
{
"first": "W",
"middle": [],
"last": "Yih",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Toutanova",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Platt",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Meek",
"suffix": ""
}
],
"year": 2011,
"venue": "CoNLL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yih, W., Toutanova, K., Platt, J., and Meek, C. 2011. Learning discriminative projections for text similarity measures. In CoNLL.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"uris": null,
"text": "Illustration of the network architecture and information flow of the DSSM useful to the corresponding tasks, with a manageable vector size.",
"num": null,
"type_str": "figure"
},
"FIGREF1": {
"uris": null,
"text": "A sample of document snippets and the keywords (in bold) detected by the DSSM.",
"num": null,
"type_str": "figure"
},
"TABREF1": {
"type_str": "table",
"content": "<table><tr><td>task performance (NDCG @ K) of interest models over HEAD, TORSO and</td></tr><tr><td>TAIL test sets. Bold indicates statistical significance over all non-shaded results using t-test (</td></tr><tr><td>0.05).</td></tr></table>",
"text": "Highlighting",
"num": null,
"html": null
},
"TABREF3": {
"type_str": "table",
"content": "<table><tr><td>entity search task perfor-</td></tr><tr><td>mance (NDCG @ K and AUC). * indicates sta-</td></tr><tr><td>tistical significance over all non-shaded single</td></tr><tr><td>model results (Rows 1 to 6) using t-test (</td></tr><tr><td>0.05). # indicates statistical significance over re-</td></tr><tr><td>sults in Row 7. ## indicates statistical signifi-</td></tr><tr><td>cance over results in Rows 7 and 8.</td></tr></table>",
"text": "Contextual",
"num": null,
"html": null
}
}
}
}