|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:54:29.876723Z" |
|
}, |
|
"title": "A Neural Generative Model for Joint Learning Topics and Topic-Specific Word Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Lixing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Warwick", |
|
"location": { |
|
"country": "UK" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yulan", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Warwick", |
|
"location": { |
|
"country": "UK" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Deyu", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Key Laboratory of Computer Network and Information Integration", |
|
"institution": "Southeast University", |
|
"location": { |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We propose a novel generative model to explore both local and global context for joint learning topics and topic-specific word embeddings. In particular, we assume that global latent topics are shared across documents, a word is generated by a hidden semantic vector encoding its contextual semantic meaning, and its context words are generated conditional on both the hidden semantic vector and global latent topics. Topics are trained jointly with the word embeddings. The trained model maps words to topic-dependent embeddings, which naturally addresses the issue of word polysemy. Experimental results show that the proposed model outperforms the word-level embedding methods in both word similarity evaluation and word sense disambiguation. Furthermore, the model also extracts more coherent topics compared with existing neural topic models or other models for joint learning of topics and word embeddings. Finally, the model can be easily integrated with existing deep contextualized word embedding learning methods to further improve the performance of downstream tasks such as sentiment classification.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We propose a novel generative model to explore both local and global context for joint learning topics and topic-specific word embeddings. In particular, we assume that global latent topics are shared across documents, a word is generated by a hidden semantic vector encoding its contextual semantic meaning, and its context words are generated conditional on both the hidden semantic vector and global latent topics. Topics are trained jointly with the word embeddings. The trained model maps words to topic-dependent embeddings, which naturally addresses the issue of word polysemy. Experimental results show that the proposed model outperforms the word-level embedding methods in both word similarity evaluation and word sense disambiguation. Furthermore, the model also extracts more coherent topics compared with existing neural topic models or other models for joint learning of topics and word embeddings. Finally, the model can be easily integrated with existing deep contextualized word embedding learning methods to further improve the performance of downstream tasks such as sentiment classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Probabilistic topic models assume that words are generated from latent topics that can be inferred from word co-occurrence patterns taking a document as global context. In recent years, various neural topic models have been proposed. Some of them are built on the Variational Auto-Encoder (VAE) (Kingma and Welling, 2014), which utilizes deep neural networks to approximate the in-tractable posterior distribution of observed words given latent topics (Miao et al., 2016; Srivastava and Sutton, 2017; Bouchacourt et al., 2018) . However, these models take the bag-of-words (BOWs) representation of a given document as the input to the VAE and aim to learn hidden topics that can be used to reconstruct the original document. They do not learn word embeddings concurrently.", |
|
"cite_spans": [ |
|
{ |
|
"start": 452, |
|
"end": 471, |
|
"text": "(Miao et al., 2016;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 472, |
|
"end": 500, |
|
"text": "Srivastava and Sutton, 2017;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 501, |
|
"end": 526, |
|
"text": "Bouchacourt et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Other topic modeling approaches explore the pre-trained word embeddings for the extraction of more semantically coherent topics since word embeddings capture syntactic and semantic regularities by encoding the local context of word co-occurrence patterns. For example, the topicword generation process in the traditional topic models can be replaced by generating word embeddings given latent topics (Das et al., 2015) or by a two-component mixture of a Dirichlet multinomial component and a word embedding component (Nguyen et al., 2015) . Alternatively, the information derived from word embeddings can be used to promote semantically related words in the Polya Urn sampling process of topic models (Li et al., 2017) or generate topic hierarchies (Zhao et al., 2018) . However, all these models use pretrained word embeddings and do not learn word embeddings jointly with topics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 400, |
|
"end": 418, |
|
"text": "(Das et al., 2015)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 517, |
|
"end": 538, |
|
"text": "(Nguyen et al., 2015)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 701, |
|
"end": 718, |
|
"text": "(Li et al., 2017)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 749, |
|
"end": 768, |
|
"text": "(Zhao et al., 2018)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Word embeddings could improve the topic modeling results, but conversely, the topic information could also benefit word embedding learning. Early word embedding learning methods (Mikolov et al., 2013a ) learn a mapping function to project a word to a single vector in an embedding space. Such one-to-one mapping cannot deal with word polysemy, as a word could have multiple meanings depending on its context. For example, the word 'patient' has two possible meanings 'enduring trying circumstances with even temper' and 'a person who requires medical care'. When analyzing reviews about restaurants and health services, the semantic meaning of 'patient' could be inferred depending on which topic it is associated with. One solution is to first extract topics using the standard latent Dirichlet allocation (LDA) model and then incorporate the topical information into word embedding learning by treating each topic as a pseudo-word (Liu et al., 2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 178, |
|
"end": 200, |
|
"text": "(Mikolov et al., 2013a", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 933, |
|
"end": 951, |
|
"text": "(Liu et al., 2015)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Whereas the aforementioned approaches adopt a two-step process, by either using pre-trained word embeddings to improve the topic extraction results in topic modeling, or incorporating topics extracted using a standard topic model into word embedding learning, Shi et al. (2017) developed a Skip-Gram based model to jointly learn topics and word embeddings based on the Probabilistic Latent Semantic Analysis (PLSA), where each word is associated with two matrices rather than a vector to induce topic-dependent embeddings. This is a rather cumbersome setup. Foulds (2018) used the Skip-Gram to imitate the probabilistic topic model that each word is represented as an importance vector over topics for context generation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 260, |
|
"end": 277, |
|
"text": "Shi et al. (2017)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 558, |
|
"end": 571, |
|
"text": "Foulds (2018)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we propose a neural generative model built on VAE, called the Joint Topic Wordembedding (JTW) model, for jointly learning topics and topic-specific word embeddings. More concretely, we introduce topics as tangible parameters that are shared across all the context windows. We assume that the pivot word is generated by the hidden semantics encoding the local context where it occurred. Then the hidden semantics is transformed to a topical distribution taking into account the global topics, and this enables the generation of context words. Our rationale is that the context words are generated by the hidden semantics of the pivot word together with a global topic matrix, which captures the notion that the word has multiple meanings that should be shared across the corpus. We are thus able to learn topics and generate topic-dependent word embeddings jointly. The results of our model also allow the visualization of word semantics because topics can be visualized via the top words and words can be encoded as distributions over the topics 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "1 Our source code is made available at http:// github.com/somethingx02/topical_wordvec_ models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In summary, our contribution is three-fold:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We propose a novel Joint Topic Wordembedding (JTW) model built on VAE, for jointly learning topics and topic-specific word embeddings;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We perform extensive experiments and show that JTW outperforms other Skip-Grams or Bayesian alternatives in both word similarity evaluation and word sense disambiguation tasks, and can extract semantically more coherent topics from data;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We also show that JTW can be easily integrated with existing deep contextualized word embedding learning models to further improve the performance of downstream tasks such as sentiment classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our work is related to two lines of research:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Skip-Gram approaches for word embedding learning. The Skip-Gram, also known as WORD2VEC (Mikolov et al., 2013b) , maximizes the probability of the context words w n given a centroid word x n . Pennington et al. (2014) pointed out that Skip-Gram neglects the global word cooccurrence statistics. They thus formulated the Skip-Gram as a non-negative matrix factorization (NMF) with the cross-entropy loss switched to the least square error. Another NMF-based method was proposed by Xu et al. (2018) , in which the Euclidean distance was substituted with Wasserstein distance. Jameel and Schockaert (2019) rewrote the NMF objective as a cumulative product of normal distributions, in which each factor is multiplied by a von Mises-Fisher (vMF) distribution of context word vectors, to hopefully cluster the context words since the vMF density retains the cosine similarity. Although the Skip-Gram-based methods attracted extensive attention, they were criticized for their inability to capture polysemy (Pilehvar and Collier, 2016) . A pioneered solution to this problem is the Multiple-Sense Skip-Gram model (Neelakantan et al., 2014) , where word vectors in a context are first averaged then clustered with other contexts to obtain a sense representation for the pivot word. In the same vein, Iacobacci and Navigli (2019) leveraged sense tags annotated by BabelNet (Navigli and Ponzetto, 2012) to jointly learn word and sense representations in the Skip-Gram manner that the context words are parameterized via a shared look-up table and sent to a BiLSTM to match the pivot word vector.", |
|
"cite_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 111, |
|
"text": "(Mikolov et al., 2013b)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 193, |
|
"end": 217, |
|
"text": "Pennington et al. (2014)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 480, |
|
"end": 496, |
|
"text": "Xu et al. (2018)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 574, |
|
"end": 602, |
|
"text": "Jameel and Schockaert (2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1000, |
|
"end": 1028, |
|
"text": "(Pilehvar and Collier, 2016)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 1106, |
|
"end": 1132, |
|
"text": "(Neelakantan et al., 2014)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1292, |
|
"end": 1320, |
|
"text": "Iacobacci and Navigli (2019)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1364, |
|
"end": 1392, |
|
"text": "(Navigli and Ponzetto, 2012)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "There have also been Bayesian extensions of the Skip-Gram models for word embedding learning. Barkan (2017) inherited the probabilistic generative line while extending the Skip-Gram by placing a Gaussian prior on the parameterized word vectors. The parameters were estimated via variational inference. In a similar vein, Rios et al. (2018) proposed to generate words in bilingual parallel sentences by shared hidden semantics. They introduced a latent index variable to align the hidden semantics of a word in the source language to its equivalence in the target language. More recently, Bra\u017einskas et al. (2018) proposed the Bayesian Skip-Gram (BSG) model, in which each word type with its related word senses collapsed is associated with a 'prior' or static embedding and then, depending on the context, the representation of each word is updated by 'posterior' or dynamic embedding. Through Bayesian modeling, BSG is able to learn context-dependent word embeddings. It does not explicitly model topics, however. In our proposed JTW, global topics are shared among all documents and learned from data. Also, whereas BSG only models the generation of context words given a pivot word, JTW explicitly models the generation of both the pivot word and the context words with different generative routes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 107, |
|
"text": "Barkan (2017)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 321, |
|
"end": 339, |
|
"text": "Rios et al. (2018)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 588, |
|
"end": 612, |
|
"text": "Bra\u017einskas et al. (2018)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Combining word embeddings with topic modeling. Pre-trained word embeddings can be used to improve the topic modeling performance. For example, Das et al. (2015) proposed the Gaussian LDA model, which, instead of generating discrete word tokens given latent topics, generates draws from a multivariate Gaussian of word embeddings. Nguyen et al. (2015) also replaced the topic-word Dirichlet multinomial component in traditional topic models, but by a two-component mixture of a Dirichlet multinomial component and a word embedding component. Li et al. (2017) proposed to modify the Polya Urn sampling process of the LDA model by promoting semantically related words obtained from word embeddings. More recently, Zhao et al. (2018) proposed to adapt a multi-layer Gamma Belief Network to generate topic hierarchies and also fine-grained interpretation of local topics, both of which are informed by word embeddings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 160, |
|
"text": "Das et al. (2015)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 541, |
|
"end": 557, |
|
"text": "Li et al. (2017)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 711, |
|
"end": 729, |
|
"text": "Zhao et al. (2018)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Instead of using word embeddings for topic modeling, Liu et al. (2015) proposed the Topical Word Embedding model, which incorporates the topical information derived from standard topic models into word embedding learning by treating each topic as a pseudo-word. Briakou et al. (2019) followed this route and proposed a four-stage model in which topics were first extracted from a corpus by LDA and then the topic-based word embeddings are mapped to a shared space using anchor words that were retrieved from the WordNet.", |
|
"cite_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 70, |
|
"text": "Liu et al. (2015)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 262, |
|
"end": 283, |
|
"text": "Briakou et al. (2019)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "There are also approaches proposed to jointly learn topics and word embeddings built on Skip-Gram models. Shi et al. (2017) developed a Skip-Gram Topical word Embedding (STE) model built on PLSA where each word is associated with two matrices-one matrix used when the word is a pivot word and another used when the word is considered as a context word. Expectation-Maximization is used to estimate model parameters. Foulds (2018) proposed the Mixed-Membership Skip-Gram model (MMSG), which assumes a topic is drawn for each context and the word in the context is drawn from the logbilinear model based on the topic embeddings. Foulds trained their model by alternating between Gibbs sampling and noise-contrastive estimation. MMSG only models the generation of context words, but not pivot words.", |
|
"cite_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 123, |
|
"text": "Shi et al. (2017)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 416, |
|
"end": 429, |
|
"text": "Foulds (2018)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Whereas our proposed JTW also resembles the similarity to the Skip-Gram model in that it predicts the context word given the pivot word, it is different from the existing approaches in that it assumes global latent topics shared across all documents and the generation of the pivot word and the context words follows different generative routes. Moreover, it is built on VAE and is trained using neural networks for more efficient parameter inference. Following the problem setup in the Skip-Gram model, we consider a pivot word x n and its context window w n = w n,1:C . We assume there are a total of N pivot word tokens and each context window contains C context words. However, as opposed to Skip-Gram, we do not compute the joint probability as a product chain of conditional probabilities of the context word given the pivot. Instead, in our model, context words are represented as BOWs for each context window by assuming the exchangeability of context words within the local context window.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We hypothesize that the hidden semantic vector z n of each word x n induces a topical distribution that is combined with the global corpus-wide latent topics to generate context words. Topics are represented as a probability matrix where each row is a multinomial distribution measuring the importance of each word within a topic. The hidden semantics z n of the pivot word x n is transformed to a topical distribution \u03b6 n , which participates in the generation of context words. Our assumption is that each word embodies a finite set of meanings that can be interpreted as topics, thus each word representation can be transformed to a distribution over topics. Context words are generated by first selecting a topic and then sampled according to the corresponding multinomial distribution. This enables a quick understanding of word semantics through the topical distribution and at the same time learning the latent topics from the corpus. The generative process is given below:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 For each word position n \u2208 {1, 2, 3, . . . , N }:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "-Draw hidden semantic representation z n \u223c N (0, I) -Choose a pivot word x n \u223c p(x n |z n ) -Transform z n to \u03b6 n with a multi-layered perceptron: \u03b6 n = MLP (z n ) -For each context word position c \u2208 {1, 2, 3, . . . , C}: * Choose a topic indicator t n,c \u223c Categorical (\u03b6 n ) * Choose a context word w n,c \u223c p(w n,c |\u03b2 t n,c )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Here, all the distributions are functions approximated by neural networks, e.g.,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "p(x n |z n ) \u221d exp (M x z n + b x )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": ", which will be discussed in more details in the Decoder section, t n,c indexes a row \u03b2 t n,c in the topic matrix. We could implicitly marginalize out the topic indicators, in which case the probability of a word would be written", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "as w n,c |\u03b6 n , \u03b2 \u223c Categorical (\u03c3(\u03b2 T \u03b6 n )), where \u03c3(\u2022)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "denotes the softmax function. The prior distribution for z n is a multivariate Gaussian distribution with the mean 0 and covariance I, of which the posterior indicates the hidden semantics of the pivot word when conditioned on {x n , w n }.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Although both JTW and BSG assume that a word can have multiple senses and use a latent embedding z to represent the hidden semantic meaning of each pivot word, there are some key differences in their generative processes. JTW first draws a latent embedding z from a standard Gaussian prior that is deterministically transformed into topic distributions and a distribution over pivot words. The pivot word is conditionally independent of its context given the latent embedding. At the same time, each context word is assigned a latent topic, drawn from a shared topic distribution which leverages the global topic information, and then drawn independently of one another. In BSG the latent embedding z is also drawn from a Gaussian prior but the context words are generated directly from the latent embedding z, as opposed to via a mixture model as in JTW. Therefore, JTW is able to group semantically similar words into topics, which is not the case in BSG.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Given the observed variables {x 1:N , w 1:N }, the objective of the model is to infer the posterior p(z|x, w). This is achieved by the VAE framework. As illustrated in Figure 1 , the JTW model is composed of an encoder and a decoder, each of which is constructed by neural networks. The family of distributions to approximate the posterior is Gaussian, in which \u00b5 n and \u03c3 n are optimized. As in VAE, we optimize \u00b5 n and \u03c3 n through the training of parameters in neural networks (e.g., we optimize", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 168, |
|
"end": 176, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "M \u03c0 in \u00b5 n = M T \u03c0 \u03c0 n + b \u03c0 instead of updating \u00b5 n directly).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The VAE naturally simulates the variational inference (Jordan et al., 1999) , where a family of parameterized distributions q \u03c6 (z n |x n , w n ) are optimized to approximate the intractable true posterior p \u03b8 (z n |x n , w n ). This is achieved by minimizing the Kullback-Leibler (KL) divergence between the variational distribution and the true posterior for each data point:", |
|
"cite_spans": [ |
|
{ |
|
"start": 54, |
|
"end": 75, |
|
"text": "(Jordan et al., 1999)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ELBO", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "KL(q \u03c6 (z n |x n , w n )||p \u03b8 (z n |x n , w n )) = log p \u03b8 (x n , w n )\u2212E q \u03c6 [log p \u03b8 (z n , x n , w n ) \u2212 log q \u03c6 (z n |x n , w n )],", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "ELBO", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where the expectation term is called the Evidence Lower Bound (ELBO), denoted as L(\u03b8, \u03c6; x n , w n ). VAE optimizes ELBO to presumably minimize the KL-divergence. The ELBO is further derived as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ELBO", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L(\u03b8, \u03c6; x n , w n ) = E q \u03c6 (z n |x n ,w n ) [log p \u03b8 (x n , w n |z n )] \u2212 KL(q \u03c6 (z n |x n , w n )||p(z n )).", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "ELBO", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The first term on the left-hand side of Equation 2, which is an expectation with respect to q \u03c6 (z n |x n , w n ), can be estimated by sampling due to its intractability. That is:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ELBO", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "E q \u03c6 (z n |x n ,w n ) [log p \u03b8 (x n , w n |z n )] \u2248 1 S S s=1 log p \u03b8 (x n , w n |z (s) n ),", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "ELBO", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ELBO", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "z (s) n \u223c q \u03c6 (z n |x n , w n ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ELBO", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Here we use z (s) n to represent the samples since the sampled distribution is related to x n .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ELBO", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The Encoder corresponds to q \u03c6 (z n |x n , w n ) in Equation 3. Recall that the variational family for approximating the true posterior is a Gaussian Distribution parameterized by {\u00b5 n , \u03c3 n }. As such, the encoder is essentially a set of neural functions mapping from observations to Gaussian parameters {\u00b5 n , \u03c3 n }. The neural functions are defined as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u03c0 n = MLP (x n , w n ), \u00b5 n = M T \u00b5 \u03c0 n + b \u03c0 , \u03c3 n = M T \u03c3 \u03c0 n +b \u03c3 ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where the MLP denotes the multi-layered perceptron and the context window w n is represented as a BOW that is a Vdimentional vector. The encoder outputs Gaussian parameters {\u00b5 n , \u03c3 n }, which constitutes the variational distribution q \u03c6 (z n |x n , w n ). In order to differentiate q \u03c6 (z n |x n , w n ) with respect to \u03c6, we apply the reparameterization trick (Kingma and Welling, 2014) by using the following transformation:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "z (s) n = \u00b5 n + \u03c3 n \u2299 \u01eb (s) n \u01eb (s) n \u223c N (0, I).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "(4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoder", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The Decoder corresponds to", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "p \u03b8 (x n , w n |z (s) n ) in Equation (3). It is a neural function that maps the sample z (s) n to the distribution p \u03b8 (x p n , w p n |z (s)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "n ) with random variables instantiated by x n and w n . More concretely, we define two neural functions to generate the pivot word and the context words separately. Both the functions involve an MLP, while the context words are generated independently from each other by the topic mixture weighted by the hidden topic distributions. The neural functions are expressed as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(x p n |z (s) n ) \u221d exp (M x z (s) n + b x )", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Decoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b6 (s) n = MLP (z (s) n ) (6) p(w p n,c |\u03b6 (s) n ) \u221d exp (\u03b2 T \u03b6 (s) n + b w )", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Decoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In this case, the MLP for the pivot word is specified as a fully connected layer. Recall that we represent the context window w n as BOW, the instantiated probability p \u03b8 (x n , w n |z (s) n ) can be therefore derived as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p \u03b8 (x n , w n |z (s) n ) \u221d exp(M x z (s) n + b x )[x n ] V v=1 exp (\u03b2 T \u03b6 (s) n + b w )[v] w n [v]", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "Decoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "exp (M x z (s) n + b x )[x n ] denotes the x n -th element of the vector exp (M x z (s) n + b x ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoder", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We are now ready to compute ELBO in Equation 2with the specified", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Function", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "q \u03c6 (z n |x n , w n ) and p \u03b8 (x n , w n |z (s)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Function", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "n ) in hand. Our final objective function that needs to be maximized is:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Function", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L(\u03b8, \u03c6; x n , w n ) = 1 S S s=1 log p \u03b8 (x n , w n |\u00b5 n + \u03c3 n \u2299 \u01eb (s) n ) \u2212 1 2 D d=1 1 + log \u03c3 n [d] 2 \u2212 \u00b5 n [d] 2 \u2212 \u03c3 n [d] 2 ,", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "Loss Function", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "where D denotes the dimension of \u00b5. S denotes the number of sample points required for the computation of the expectation term. The loss function is the negative of the objective function. The learning procedure is summarized in Algorithm 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Loss Function", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "After training, we are able to map the words to their respective representations using the Encoder part of JTW. The Encoder takes a pivot word together with its context window as an input and outputs the parameters of the variational distribution considered to be the approximated posterior q \u03c6 (z|x n , w n ), which is a Gaussian distribution in our case. The word representations are Gaussian parameters {\u00b5 n , \u03c3 n }. Because the output of the Encoder is formulated as a Gaussian distribution, the word similarity of two words can be either computed by the KL-divergence between the Gaussian distributions, or by the cosine similarity between their means. We use the Gaussian mean \u00b5 to represent a word given Shuffle dataset", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Prediction", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "x 1:N , w 1:N ; 6 for 1 to N B do 7 Generate S samples \u01eb (s) \u223c N (0, I); 8 Compute gradient g \u2190 \u2207 \u03b8,\u03c6 L(\u03b8, \u03c6; x B , w B ) according to Equation (9); 9", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Prediction", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "Update parameters \u03b8, \u03c6 using gradient g;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Prediction", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "10 i \u2190 i + 1, \u03b7 \u2190 \u03b7 \u00d7 lrDecay;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Prediction", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "11 return \u03b8, \u03c6;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Prediction", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "its context. The universal representation of a word type can be obtained by averaging the posterior means of all occurrences over the corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Prediction", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "Dataset. We train the proposed JTW model on the Yelp dataset, 2 which is a collection of more than 4 million reviews on over 140k business categories. Although the number of business categories is large, the vast majority of reviews falls into 5 business categories. The top Restaurant category consists of more than 40% of reviews. The next top 4 categories, Shopping, Beauty &Spas, Automotive, and Clinical, contain about 8%, 6%, 4%, and 3% of reviews, respectively. The Clinical documents are further filtered by business subcategories defined in Tran and Lee (2017) , which are recognized as core clinical businesses. This results in 176,733 documents for the Clinical category. Because the dataset is extremely imbalanced, simply training the model on the original dataset will likely overfit to the Restaurant category. We thus balance the dataset by sampling roughly an equal number of documents from each of the top 5 categories. The vocabulary size is set to 8,000. We use Mallet 3 to filter out stopwords. The final dataset consists of 865,616 documents with a total of 101,468,071 tokens.", |
|
"cite_spans": [ |
|
{ |
|
"start": 550, |
|
"end": 569, |
|
"text": "Tran and Lee (2017)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Parameter Setting. The word semantics are represented as 100-dimensional vectors (i.e., D = 100), which is a default configuration for word representations (Mikolov et al., 2013a; Bra\u017einskas et al., 2018) . The number of latent topics is set to 50. It has been previously studied in Kingma and Welling (2014) that the number of samples per data point can be set to 1 if the batch size is large, (e.g., > 100). In our experiments, we set the batch size to 2,048 and the number of samples per data point, S, to 1. The context window size is set to 10. Network parameters (i.e., \u03b8, \u03c6) are all initialized by a normal distribution with zero mean and 0.1 variance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 156, |
|
"end": 179, |
|
"text": "(Mikolov et al., 2013a;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 180, |
|
"end": 204, |
|
"text": "Bra\u017einskas et al., 2018)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Baselines. We compare our model against four baselines:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 CvMF (Jameel and Schockaert, 2019) . CvMF can be viewed as an extension of GloVe that modifies the objective function by multiplying a mixture of vMFs, whose distance is measured by cosine similarity instead of euclidean distance. The mixture depicts the underlying semantics with which the words could be clustered.", |
|
"cite_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 36, |
|
"text": "(Jameel and Schockaert, 2019)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Bayesian Skip-Gram (BSG) (Bra\u017einskas et al., 2018) . BSG 4 is a probabilistic wordembedding method built on VAE as well, which achieved the state-of-art among other Bayesian word-embedding alternatives (Vilnis and McCallum, 2015; Barkan, 2017) . BSG infers the posterior or dynamic embedding given a pivot word and its observed context and is able to learn context-dependent word embeddings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 52, |
|
"text": "(Bra\u017einskas et al., 2018)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 204, |
|
"end": 231, |
|
"text": "(Vilnis and McCallum, 2015;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 232, |
|
"end": 245, |
|
"text": "Barkan, 2017)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Skip-gram Topical word Embedding (STE) (Shi et al., 2017) . STE adapted the commonly known Skip-Gram by associating each word with an input matrix and an output matrix and used the Expectation-Maximization method with the negative sampling for model parameter inference. For topic generation, they need to evaluate the probability of p(w t+j |z, w t ) for each topic z and each skipgram < w t ; w t+j >, and represent each topic as the ranked list of bigrams.", |
|
"cite_spans": [ |
|
{ |
|
"start": 41, |
|
"end": 59, |
|
"text": "(Shi et al., 2017)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Mixed Membership Skip-Gram (MMSG) (Foulds, 2018) . MMSG leverages mixed membership modeling in which words are assumed to be clustered into topics and the words in the context of a given pivot word are drawn from the log-bilinear model using the vector representations of the context-dependent topic. Model inference is performed using the Metropolis-Hastings-Walker algorithm with noise-contrastive estimation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 50, |
|
"text": "(Foulds, 2018)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Among the aforementioned baselines, CvMF and BSG only generate word embeddings and do not model topics explicitly. Also, CvMF only maps each word to a single word embedding whereas BSG can output context-dependent word embeddings. Both STE and MMSG can learn topics and topic-dependent embeddings at the same time. However, in STE the topic dependence is stored in the lines of word matrices and the word representations themselves are context independent. In contrast, MMSG associates each word with a topic distribution; it could produce contextualized word embeddings by summing up topic vectors weighed by the posterior topic distribution given a context. We probe into different topic counts and find the best setting for methods with topics or mixtures. In all the baselines, the dimensionality of word embeddings is tuned and finally set to 100.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We compare JTW with baselines on both word similarity and word-sense disambiguation tasks for the learned word embeddings, and also present the topic coherence and qualitative evaluation results for the extracted topics. Furthermore, we show that JTW can be easily integrated with deep contextualized word embeddings to further improve the performance of downstream tasks such as sentiment classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The word similarity task (Finkelstein et al., 2001) has been widely adopted to measure the quality of word embeddings. In the word similarity task, a number of pairwise words are given. Each pair of words should be assigned with a score that indicates their relatedness. The calculated scores are then compared with the golden scores by means of Spearman rank-order correlation coefficient. Because the word similarity task requires context-free word representations, we aggregate all the occurrences and obtain a universal vector for each word. The distance used for similarity scores is cosine similarity. For STE, we use AvgSimC following Shi et al. (2017) . We further make a comparison with the results of the Skip-Gram (SG) model, 5 which maps each word token to a single point in an Euclidean space without considering different senses of words. All the approaches are evaluated on the 7 commonly used benchmarking datasets. For JTW, we average the results over 10 runs and also report the standard deviations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 25, |
|
"end": 51, |
|
"text": "(Finkelstein et al., 2001)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 642, |
|
"end": 659, |
|
"text": "Shi et al. (2017)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Similarity", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The results are reported in Table 1 . It can be observed that among the baselines, BSG achieves the lowest score on average, followed by MMSG. Although JTW clearly beats all the other models on SimLex-999 only, it only performs slightly worse than the top model in 5 out of the remaining 6 benchmarks. Overall, JTW gives superior results on average. A noticeable gap can be observed on the Stanford's Contextual Word Similarities (SCWS) dataset where JTW, MMSG, and BSG give better results compared with SG, CvMF, and STE. This can be explained by the fact that, in SCWS, golden scores are annotated together with the context. However, SG, CvMF, and STE can only produce context-independent word vectors. The results show the clear benefit of learning contextualized word vectors. Among the topicdependent word embeddings, JTW built on VAE appears to be more effective than the PLSAbased STE and the mixed membership model MMSG, achieving the best overall score when averaging the evaluation results across all the seven benchmarking datasets. The small standard deviation of JTW indicates that the performance is consistent across multiple runs.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 35, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Word Similarity", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "While the word similarity tasks focus more on the general meaning of a word (since word pairs are presented without context), in this section, we turn to the lexical substitution task (Yuret, 2007; Thater et al., 2011) , which was designed to evaluate the word-embedding learning methods regarding their ability to disambiguate word senses. The lexical substitution task can be described by the following scenario: Given a sentence and one of its member words, find the most related replacement from a list of candidate words. As stated in Thater et al. (2011) , a good lexical substitution should not only capture the relatedness between the candidate word and the original word, but also imply the correctness with respect to the context.", |
|
"cite_spans": [ |
|
{ |
|
"start": 184, |
|
"end": 197, |
|
"text": "(Yuret, 2007;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 198, |
|
"end": 218, |
|
"text": "Thater et al., 2011)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 540, |
|
"end": 560, |
|
"text": "Thater et al. (2011)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexical Substitution", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Following Bra\u017einskas et al. (2018) , we derive the setting from Melamud et al. (2015) to ensure a fair comparison between the context-free word embedding methods and the context-dependent ones. In detail, for JTW and BSG, we capture the context of a given word using the BOW representation, and derive the representation of each candidate word taken into account of the context. For CvMF and STE, the similarity score is computed using", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 34, |
|
"text": "Bra\u017einskas et al. (2018)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 64, |
|
"end": 85, |
|
"text": "Melamud et al. (2015)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexical Substitution", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "BalAdd (x, y) = C cos (y, x) + C c=1 cos (y, w c ) 2C ,", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "Lexical Substitution", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "CvMF BSG STE MMSG JTW Accuracy 0.440 0.453 0.433 0.474 0.487 Table 2 : Accuracy on the lexical substitution task.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 68, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "where y is the candidate word and x denotes the original word. For MMSG, the original word's representation is calculated as the sum of its associated topic vectors weighed by the word's posterior topical distribution. Given an original word and its context, we choose the candidate word with the highest similarity score. We compare the performance of various models on lexical substitution using the dataset from the SemEval 2007 task 10 6 (McCarthy and Navigli, 2007) , which consists of 1,688 instances. Because some words have multiple synonyms as annotated in the dataset, we would consider a chosen candidate word as a correct prediction if it hits one of the ground-truth replacements.", |
|
"cite_spans": [ |
|
{ |
|
"start": 456, |
|
"end": 470, |
|
"text": "Navigli, 2007)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We report in Table 2 the accuracy scores of different methods. Context-sensitive word embeddings generally perform better than context-free alternatives. STE can only learn context-independent word embeddings and hence gives the lowest score. BSG is able to learn context-dependent word embeddings and outperforms CvMF. Among the joint topic and word embedding learning methods, STE performs the worst, showing that associating each word with two matrices and learning topic-dependent word embeddings based on PLSA appear to be less effective. Both JTW and MMSG show superior performances compared to BSG. JTW outperforms MMSG because JTW also models the generation of pivot word in addition to context words and the VAE framework for parameter inference is more effective than the annealed negative contrastive estimation used in MMSG.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 13, |
|
"end": 20, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Because only STE and MMSG can jointly learn topics and word embeddings among the baselines, we compare our proposed JTW with these two models in term of topic quality. The evaluation metric we employed is the topic coherence metric proposed in R\u00f6der et al. (2015) . The metric extracts co-occurrence counts of the topic words in Wikipedia using a sliding window of size 6 http://www.dianamccarthy.co.uk/ task10index.html. 110. For each top word a vector is calculated whose elements are the normalized pointwise mutual information between the word and every other top words. Given a topic, the arithmetic mean of all vector pairs' cosine similarity is treated as the coherence measure. We calculate the topic coherence score of each extracted topic based on its associated top ten words using Palmetto 7 (Rosner et al., 2014) . The topic coherence results with the topic number varying between 10 and 200 are plotted in Figure 2 . The graph shows that JTW scores the highest under all the topic settings. It gives the best coherence score of 0.416 at 50 topics, and gradually flattens with the increasing number of topics. MMSG exhibits an upward trend up to 100 topics, and drops to 0.365 when the topic number is set to 150. STE undergoes a gradual decrease and then stabilizes with the topic number beyond 150.", |
|
"cite_spans": [ |
|
{ |
|
"start": 244, |
|
"end": 263, |
|
"text": "R\u00f6der et al. (2015)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 804, |
|
"end": 825, |
|
"text": "(Rosner et al., 2014)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 920, |
|
"end": 928, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Topic Coherence", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We present in Table 3 the example topics extracted by JTW and MMSG. It can be easily inferred from the top words generated by JTW that Topic 1 is related to 'Food', whereas Topic 5 is about the 'Clinical Service', which is identified by the words 'caring' and 'physician'. It can also be deduced from the top words that Topic 2, 3, and 4 represent 'Shopping', 'Beauty', and 'Automotive', respectively. In contrast, topics produced by MMSG contain more semantically less coherent words as highlighted by italics. For example, Topic 1 in MMSG contains words relating to both food and staff. This might be caused by the fact that, in MMSG, training is performed as a twostage process by first assigning topics to words using Gibbs sampling then estimating the topic vectors and word vectors from word cooccurrences and topic assignments via maximum likelihood estimator. This is equivalent to a topic model with parameterized word embeddings. Conversely, in JTW, latent variables in the generative process are recognized as word representations. Parameters reside in the generative network, and are inferred by the VAE. No extra parameters are introduced to encode the words. Therefore, the topics extracted tend to be more identifiable.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 14, |
|
"end": 21, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Extracted Topics", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "The extracted topics allow the visualization of word semantics. In JTW, a word's semantic meanings can be interpreted as a distribution over the discovered latent topics. This is achieved by aggregating all the contextualized topical distribution of a particular word throughout the corpus. Meanwhile, when a word is placed under a specific context, its topical distribution can be directly transformed from its contextualized representation. We chose three words-'plastic', 'bar' and 'patient'-to illustrate the polysemous nature of them. To further demonstrate their context-dependent meanings, we also visualize the topic distribution of the following three sentences: (1) Effective patient care requires clinical knowledge and understanding of physical therapy;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visualization of Word Semantics", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "(2) Restaurant servers require patient temperament;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visualization of Word Semantics", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "(3) You have to bring your own bags or boxes but you can also purchase plastic bags. The topical distribution for the pivot words and the three example sentences are shown in Figure 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 175, |
|
"end": 183, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Visualization of Word Semantics", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "We can deduce from the overall distributions that the semantic meaning of 'plastic' distributes almost equally on two topics, 'shopping' and Figure 3 : The overall topical distributions and contextualized topical distributions of the example words and the contextualized topical distribution of three example sentences. Note that the x-axis denotes the five example topics shown in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 141, |
|
"end": 149, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 382, |
|
"end": 389, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Visualization of Word Semantics", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "'beauty', while the meaning of 'bar' is more prominent on the 'food' and 'shopping' topics. 'Patient' has a strong connection with the 'clinical' topic, though it is also associated with the 'food' topic. When considering a specific context about the patient care, Sentence 1 has its topic distribution peaked at the 'clinical' topic. Sentence 2 also contains the word 'patient', but it now has its topic distribution peaked at 'food'. Sentence 3 mentioned 'plastic bags' and its most prominent topic is 'shopping'. These results show that JTW can indeed jointly learn latent topics and topic-specific word embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visualization of Word Semantics", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "Recent advances in deep contextualized word representation learning have generated significant impact in natural language processing. Different from traditional word embedding learning methods such as Word2Vec or GloVe, where each word is mapped to a single vector representation, deep contextualized word representation learning methods are typically trained by language modeling and generate a different word vector for each word depending on the context in which it is used. A notable work is ELMo (Peters et al., 2018) , which is commonly regarded as the pioneer for deriving deep contextualized word embeddings (Devlin et al., 2019) . ELMo calculates the weighed sum of different layers of a multi-layered BiLSTM-based language model, using the normalized vector as a representation for the corresponding word. More recently, in contrast to ELMo, BERT (Devlin et al., 2019) was proposed to apply the bidirectional training of Transformer to masked language modelling. Because of its capability of effectively encoding contextualized knowledge from huge external corpora in word embeddings, BERT has refreshed the state-of-art results on a number of NLP tasks. While Word2Vec/GloVe and ELMo/BERT represent the two opposite extremes in word embedding learning, with the former learning a single vector representation for each word and the latter learning a separate vector representation for each occurrence of a word, our proposed JTW sits in the middle that it learns different word vectors depending on which topic a word is associated with. Nevertheless, we can incorporate ELMo/BERT embeddings into JTW. This is achieved by replacing the BOW input with the pre-trained ELMo/BERT word embeddings in the Encoder-Decoder architecture of JTW, making the resulting word embeddings better at capturing semantic topics in a specific domain. More precisely, the training objective is switched to the cosine value of half the angle between the input ELMo/BERT vector and decoded output vector formulated as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 501, |
|
"end": 522, |
|
"text": "(Peters et al., 2018)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 616, |
|
"end": 637, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 857, |
|
"end": 878, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Integration with Deep Contextualized Word Embeddings", |
|
"sec_num": "5.6" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p \u03b8 (x n , w n |z (s) n ) \u221d cos ( 1 2 arccos ( x \u22a4 n \u2022 x (p) n x n x (p) n )) C c=1 cos ( 1 2 arccos ( w \u22a4 n,c \u2022 w (p) n,c w n,c w (p) n,c )),", |
|
"eq_num": "(11)" |
|
} |
|
], |
|
"section": "Integration with Deep Contextualized Word Embeddings", |
|
"sec_num": "5.6" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Integration with Deep Contextualized Word Embeddings", |
|
"sec_num": "5.6" |
|
}, |
|
{ |
|
"text": "x (p) n and w (p)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Integration with Deep Contextualized Word Embeddings", |
|
"sec_num": "5.6" |
|
}, |
|
{ |
|
"text": "n,c are the reconstructed representations generated from z 7, respectively. Recall that the input to the model has been encoded by pre-trained word vectors (e.g., 300-dimensional vectors). Our training objective is to make the reconstructed x n,c as close as possible to their original input word embeddings. The difference is measured by the angle between the input and the output vectors. Normalized ELMo/BERT vectors can be transformed to the polar coordinate system with trigonometric functions, which forms a probability distribution by", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Integration with Deep Contextualized Word Embeddings", |
|
"sec_num": "5.6" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03c0 0 1 2 cos \u03b8 2 d\u03b8 = 1,", |
|
"eq_num": "(12)" |
|
} |
|
], |
|
"section": "Integration with Deep Contextualized Word Embeddings", |
|
"sec_num": "5.6" |
|
}, |
|
{ |
|
"text": "and the function is monotone to the similarity between the input ELMo/BERT embeddings and the reconstructed output embeddings, which", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Integration with Deep Contextualized Word Embeddings", |
|
"sec_num": "5.6" |
|
}, |
|
{ |
|
"text": "Recall Macro-F1 Micro-F1 JTW 0.5713\u00b1.021 0.5639\u00b1.014 0.5599\u00b1.016 0.7339\u00b1.015 ELMo 0.6091\u00b1.005 0.6053\u00b1.001 0.6056\u00b1.002 0.7610\u00b1.005 BERT 0.6293\u00b1.014 0.5952\u00b1.006 0.6041\u00b1.012 0.7626\u00b1.005 JTW-ELMo 0.6286\u00b1.008 0.6110\u00b1.004 0.6168\u00b1.008 0.7783\u00b1.004 JTW-BERT 0.6354\u00b1.014 0.6081\u00b1.009 0.6045\u00b1.014 0.7806\u00b1.005 Table 4 : Results on the 5-class sentiment classification by 10-fold cross validation on the Yelp reviews.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 297, |
|
"end": 304, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Criteria Precision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "reaches its peak when x n = x (p) n (i.e., \u03b8 = 0). Therefore, we are able to replace Equation 8with Equation 11when an ELMo/BERT is attached. The input vectors of the Encoder are then the embeddings produced by ELMo/BERT, and the Decoder output are the reconstructed word embeddings aligned with the input.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Criteria Precision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We resort to the sentiment classification task on Yelp and compare the performance of JTW, ELMo, and BERT, 8 and the integration of both, JTW-ELMo and JTW-BERT, by 10-fold cross validation. In all the experiments, we fine-tune the models on the training set consisting of 90% of documents sampled from the dataset described in Section 4 and evaluate on the 10% of data that serves as the test set. We employ the further pre-training scheme (Sun et al., 2019 ) that different learning rates are applied to each layer and slanted triangular learning rates are imposed across epochs when adapting the language model to the training corpus (Howard and Ruder, 2018) . The classifier used for all the methods is an attention hop over a BiLSTM with a softmax layer. The ground truth labels are the five-scale review ratings included in the original dataset. The 5class sentiment classification results in precision, recall, macro-F1, and micro-F1 scores are reported in Table 4 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 440, |
|
"end": 457, |
|
"text": "(Sun et al., 2019", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 636, |
|
"end": 660, |
|
"text": "(Howard and Ruder, 2018)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 963, |
|
"end": 970, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Criteria Precision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "It can be observed from Table 4 that a sentiment classifier trained on JTW-produced word embeddings gives worse results compared with that using the deep contextualized word embeddings generated by ELMo or BERT. Nevertheless, when integrating the ELMo or BERT front-end with JTW, the combined model, JTW-ELMo and JTW-BERT, outperforms the original deep contextualized word representation models, respectively. It has been verified by the paired t-test that JTW-ELMo outperforms ELMo and BERT at the 95% significance level on Micro-F1. The results show that our proposed JTW is flexible and it can be easily integrated with pre-trained contextualized word embeddings to capture the domain-specific semantics better compared to directly fine-tuning the pre-trained ELMo or BERT on the target domain, hence leading to improved sentiment classification performance.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 24, |
|
"end": 31, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Criteria Precision", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Driven by the motivation that combining word embedding learning and topic modeling can mutually benefit each other, we propose a probabilistic generative framework that can jointly discover more semantically coherent latent topics from the global context and also learn topic-specific word embeddings, which naturally address the problem of word polysemy. Experimental results verify the effectiveness of the model on word similarity evaluation and word sense disambiguation. Furthermore, the model can discover latent topics shared across documents, and the encoder of JTW can generate the topical distribution for each word. This enables an intuitive understanding of word semantics. We have also shown that our proposed JTW can be easily integrated with deep contextualized word embeddings to further improve the performance of downstream tasks. In future work, we will explore the discourse relationships between context windows to model, for example, the semantic shift between the neighboring sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Joint Topic Word-embedding (JTW) ModelIn this section, we describe our proposed Joint Topic Word-embedding (JTW) model built on VAE, as shown inFigure 1. We first give an overview of JTW, then present each component of the model, followed by the training details.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.yelp.com/dataset/ documentation/main.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://mallet.cs.umass.edu/. 4 https://github.com/ixlan/BSG.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://code.google.com/archive/p/ word2vec/.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/dice-group/ Palmetto.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/google-research/ bert.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The authors would like to thank the anonymous reviewers for insightful comments and helpful suggestions. This work was funded in part by EPSRC (grant no. EP/T017112/1). LZ was funded by the Chancellor's International Scholarship at 12 the University of Warwick. DZ was partially funded by the National Key Research and Development Program of China (2017YFB1002801) and the National Natural Science Foundation of China (61772132).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Bayesian neural word embedding", |
|
"authors": [ |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Barkan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 31st AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3135--3143", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oren Barkan. 2017. Bayesian neural word embed- ding. In Proceedings of the 31st AAAI Confer- ence on Artificial Intelligence, pages 3135-3143.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Multi-level variational autoencoder: Learning disentangled representations from grouped observations", |
|
"authors": [ |
|
{ |
|
"first": "Diane", |
|
"middle": [], |
|
"last": "Bouchacourt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryota", |
|
"middle": [], |
|
"last": "Tomioka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Nowozin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 32nd AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2095--2102", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diane Bouchacourt, Ryota Tomioka, and Sebastian Nowozin. 2018. Multi-level variational autoen- coder: Learning disentangled representations from grouped observations. In Proceedings of the 32nd AAAI Conference on Artificial Intelligence, pages 2095-2102.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Embedding words as distributions with a bayesian skip-gram model", |
|
"authors": [ |
|
{ |
|
"first": "Arthur", |
|
"middle": [], |
|
"last": "Bra\u017einskas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Serhii", |
|
"middle": [], |
|
"last": "Havrylov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of COLING 2018, the 27th International Conference on Computational Linguistics: Technical Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1775--1787", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arthur Bra\u017einskas, Serhii Havrylov, and Ivan Titov. 2018. Embedding words as distributions with a bayesian skip-gram model. In Proceed- ings of COLING 2018, the 27th International Conference on Computational Linguistics: Technical Papers, pages 1775-1787.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Cross-topic distributional semantic representations via unsupervised mappings", |
|
"authors": [ |
|
{ |
|
"first": "Eleftheria", |
|
"middle": [], |
|
"last": "Briakou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikos", |
|
"middle": [], |
|
"last": "Athanasiou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandros", |
|
"middle": [], |
|
"last": "Potamianos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1052--1061", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eleftheria Briakou, Nikos Athanasiou, and Alexandros Potamianos. 2019. Cross-topic distributional semantic representations via un- supervised mappings. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Lingui- stics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1052-1061, Minneapolis, Minnesota. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Gaussian LDA for topic models with word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Rajarshi", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manzil", |
|
"middle": [], |
|
"last": "Zaheer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "795--804", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rajarshi Das, Manzil Zaheer, and Chris Dyer. 2015. Gaussian LDA for topic models with word embeddings. In Proceedings of the 53rd Annual Meeting of the Association for Compu- tational Linguistics and the 7th International Joint Conference on Natural Language Pro- cessing, pages 795-804.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "BERT: Pretraining of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre- training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Placing search in context: The concept revisited", |
|
"authors": [ |
|
{ |
|
"first": "Lev", |
|
"middle": [], |
|
"last": "Finkelstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evgeniy", |
|
"middle": [], |
|
"last": "Gabrilovich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yossi", |
|
"middle": [], |
|
"last": "Matias", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ehud", |
|
"middle": [], |
|
"last": "Rivlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zach", |
|
"middle": [], |
|
"last": "Solan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gadi", |
|
"middle": [], |
|
"last": "Wolfman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eytan", |
|
"middle": [], |
|
"last": "Ruppin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the 10th international conference on World Wide Web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "406--414", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lev Finkelstein, Evgeniy Gabrilovich, Yossi Matias, Ehud Rivlin, Zach Solan, Gadi Wolfman, and Eytan Ruppin. 2001. Placing search in context: The concept revisited. In Proceedings of the 10th international conference on World Wide Web, pages 406-414.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Mixed membership word embeddings for computational social science", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Foulds", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "86--95", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James R. Foulds. 2018. Mixed membership word embeddings for computational social science. In Proceedings of the Twenty-First International Conference on Artificial Intelligence and Statistics, pages 86-95.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Universal language model fine-tuning for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Howard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "328--339", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeremy Howard and Sebastian Ruder. 2018. Universal language model fine-tuning for text classification. In Proceedings of the 56th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 328-339.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "LSTMEmbed: Learning word and sense representations from a large semantically annotated corpus with long short-term memories", |
|
"authors": [ |
|
{ |
|
"first": "Ignacio", |
|
"middle": [], |
|
"last": "Iacobacci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1685--1695", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ignacio Iacobacci and Roberto Navigli. 2019. LSTMEmbed: Learning word and sense repre- sentations from a large semantically annotated corpus with long short-term memories. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1685-1695, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Word and document embedding with vMFmixture priors on context word vectors", |
|
"authors": [ |
|
{ |
|
"first": "Shoaib", |
|
"middle": [], |
|
"last": "Jameel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Schockaert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3319--3328", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shoaib Jameel and Steven Schockaert. 2019. Word and document embedding with vMF- mixture priors on context word vectors. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3319-3328, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "An introduction to variational methods for graphical models", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Jordan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zoubin", |
|
"middle": [], |
|
"last": "Ghahramani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tommi", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Jaakkola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lawrence", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Saul", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Machine Learning", |
|
"volume": "37", |
|
"issue": "", |
|
"pages": "183--233", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael I. Jordan, Zoubin Ghahramani, Tommi S. Jaakkola, and Lawrence K. Saul. 1999. An introduction to variational methods for graphi- cal models. Machine Learning, 37(2):183-233.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Auto-encoding variational bayes", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Welling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Max Welling. 2014. Auto-encoding variational bayes. stat, 1050:1.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Enhancing topic modeling for short texts with auxiliary word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Chenliang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haoran", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiqian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aixin", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zongyang", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ACM Transactions on Information Systems (TOIS)", |
|
"volume": "36", |
|
"issue": "2", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chenliang Li, Yu Duan, Haoran Wang, Zhiqian Zhang, Aixin Sun, and Zongyang Ma. 2017. Enhancing topic modeling for short texts with auxiliary word embeddings. ACM Transactions on Information Systems (TOIS), 36(2):11.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Tat-Seng Chua, and Maosong Sun", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 29th AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2418--2424", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Liu, Zhiyuan Liu, Tat-Seng Chua, and Maosong Sun. 2015. Topical word embeddings. In Proceedings of the 29th AAAI Conference on Artificial Intelligence, pages 2418-2424.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Semeval-2007 task 10: English lexical substitution task", |
|
"authors": [ |
|
{ |
|
"first": "Diana", |
|
"middle": [], |
|
"last": "Mccarthy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 4th International Workshop on Semantic Evaluations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "48--53", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diana McCarthy and Roberto Navigli. 2007. Semeval-2007 task 10: English lexical sub- stitution task. In Proceedings of the 4th International Workshop on Semantic Evalua- tions, pages 48-53. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "A simple word embedding model for lexical substitution", |
|
"authors": [ |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Melamud", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 1st Workshop on Vector Space Modeling for Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--7", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oren Melamud, Omer Levy, and Ido Dagan. 2015. A simple word embedding model for lexical substitution. In Proceedings of the 1st Workshop on Vector Space Modeling for Na- tural Language Processing, pages 1-7, Denver, Colorado. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Neural variational inference for text processing", |
|
"authors": [ |
|
{ |
|
"first": "Yishu", |
|
"middle": [], |
|
"last": "Miao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "International Conference on Machine Learning (ICML)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1727--1736", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yishu Miao, Lei Yu, and Phil Blunsom. 2016. Neural variational inference for text processing. In International Conference on Machine Learn- ing (ICML), pages 1727-1736.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Efficient estimation of word representations in vector space", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013a. Efficient estimation of word representations in vector space. CoRR 2013.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Distributed representations of words and phrases and their compositionality", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 26th International Conference on Neural Information Processing Systems, NIPS'13", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3111--3119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013b. Distributed representations of words and phrases and their compositionality. In Proceedings of the 26th International Conference on Neural Information Processing Systems, NIPS'13, pages 3111-3119, USA. Curran Associates Inc.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Babelnet: The automatic construction, evaluation and application of a wide-coverage multilingual semantic network", |
|
"authors": [ |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simone", |
|
"middle": [ |
|
"Paolo" |
|
], |
|
"last": "Ponzetto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Artificial Intelligence", |
|
"volume": "193", |
|
"issue": "", |
|
"pages": "217--250", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roberto Navigli and Simone Paolo Ponzetto. 2012. Babelnet: The automatic construction, evaluation and application of a wide-coverage multilingual semantic network. Artificial Intel- ligence, 193:217-250.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Efficient non-parametric estimation of multiple embeddings per word in vector space", |
|
"authors": [ |
|
{ |
|
"first": "Arvind", |
|
"middle": [], |
|
"last": "Neelakantan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeevan", |
|
"middle": [], |
|
"last": "Shankar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Passos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1059--1069", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arvind Neelakantan, Jeevan Shankar, Alexandre Passos, and Andrew McCallum. 2014. Efficient non-parametric estimation of multiple embed- dings per word in vector space. In Proceedings of the 2014 Conference on Empirical Meth- ods in Natural Language Processing (EMNLP), pages 1059-1069.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Improving topic models with latent feature word representations", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Dat Quoc Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lan", |
|
"middle": [], |
|
"last": "Billingsley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Transactions of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "299--313", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dat Quoc Nguyen, Richard Billingsley, Lan Du, and Mark Johnson. 2015. Improving topic models with latent feature word representations. Transactions of ACL, pages 299-313.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "GloVe: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. GloVe: Global vectors for word representation. In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Deep contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2227--2237", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextu- alized word representations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Tech- nologies, Volume 1 (Long Papers), volume 1, pages 2227-2237.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "De-conflated semantic representations", |
|
"authors": [ |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Taher Pilehvar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nigel", |
|
"middle": [], |
|
"last": "Collier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1680--1690", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohammad Taher Pilehvar and Nigel Collier. 2016. De-conflated semantic representations. In Proceedings of the 2016 Conference on Empir- ical Methods in Natural Language Proces- sing (EMNLP), pages 1680-1690.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Deep generative model for joint alignment and word representation", |
|
"authors": [ |
|
{ |
|
"first": "Miguel", |
|
"middle": [], |
|
"last": "Rios", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wilker", |
|
"middle": [], |
|
"last": "Aziz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Khalil", |
|
"middle": [], |
|
"last": "Sima'an", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1011--1023", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Miguel Rios, Wilker Aziz, and Khalil Sima'an. 2018. Deep generative model for joint align- ment and word representation. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 1011-1023.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Exploring the space of topic coherence measures", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "R\u00f6der", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Both", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Hinneburg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the Eighth International Conference on Web Search and Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael R\u00f6der, Andreas Both, and Alexander Hinneburg. 2015. Exploring the space of topic coherence measures. In Proceedings of the Eighth International Conference on Web Search and Data Mining, Shanghai, February 2-6.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Evaluating topic coherence measures", |
|
"authors": [ |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Rosner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Hinneburg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "R\u00f6der", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Nettling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Both", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Frank Rosner, Alexander Hinneburg, Michael R\u00f6der, Martin Nettling, and Andreas Both. 2014. Evaluating topic coherence measures. CoRR, abs/1403.6397.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Jointly learning word embeddings and latent topics", |
|
"authors": [ |
|
{ |
|
"first": "Bei", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wai", |
|
"middle": [], |
|
"last": "Lam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shoaib", |
|
"middle": [], |
|
"last": "Jameel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Schockaert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kwun Ping", |
|
"middle": [], |
|
"last": "Lai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 40th International ACM SIGIR Conference on Research and Development in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "375--384", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bei Shi, Wai Lam, Shoaib Jameel, Steven Schockaert, and Kwun Ping Lai. 2017. Jointly learning word embeddings and latent topics. In Proceedings of the 40th International ACM SIGIR Conference on Research and Develop- ment in Information Retrieval, pages 375-384. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Autoencoding variational inference for topic 14 models", |
|
"authors": [ |
|
{ |
|
"first": "Akash", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Charles", |
|
"middle": [], |
|
"last": "Sutton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 5th International Conference on Learning Representations (ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Akash Srivastava and Charles Sutton. 2017. Autoencoding variational inference for topic 14 models. In Proceedings of the 5th International Conference on Learning Representations (ICLR).", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "How to fine-tune BERT for text classification?", |
|
"authors": [ |
|
{ |
|
"first": "Chi", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xipeng", |
|
"middle": [], |
|
"last": "Qiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yige", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuanjing", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "China National Conference on Chinese Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "194--206", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chi Sun, Xipeng Qiu, Yige Xu, and Xuanjing Huang. 2019. How to fine-tune BERT for text classification? In China National Conference on Chinese Computational Linguistics, pages 194-206. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Word meaning in context: A simple and effective vector model", |
|
"authors": [ |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Thater", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hagen", |
|
"middle": [], |
|
"last": "F\u00fcrstenau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manfred", |
|
"middle": [], |
|
"last": "Pinkal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of 5th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1134--1143", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stefan Thater, Hagen F\u00fcrstenau, and Manfred Pinkal. 2011. Word meaning in context: A simple and effective vector model. In Proceedings of 5th International Joint Conference on Natural Language Processing, pages 1134-1143.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Online reviews as health data: Examining the association between availability of health care services and patient star ratings exemplified by the Yelp academic dataset", |
|
"authors": [ |
|
{ |
|
"first": "Nam", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joon", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "JMIR Public Health and Surveillance", |
|
"volume": "3", |
|
"issue": "3", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nam N. Tran and Joon Lee. 2017. Online reviews as health data: Examining the association between availability of health care services and patient star ratings exemplified by the Yelp academic dataset. JMIR Public Health and Surveillance, 3(3).", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Word representations via gaussian embedding", |
|
"authors": [ |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Vilnis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 3rd International Conference on Learning Representations (ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luke Vilnis and Andrew McCallum. 2015. Word representations via gaussian embedding. In Proceedings of the 3rd International Conference on Learning Representations (ICLR).", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Distilled Wasserstein learning for word embedding and topic modeling", |
|
"authors": [ |
|
{ |
|
"first": "Hongteng", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenlin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lawrence", |
|
"middle": [], |
|
"last": "Carin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Advances in Neural Information Processing Systems 31", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1722--1731", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hongteng Xu, Wenlin Wang, Wei Liu, and Lawrence Carin. 2018. Distilled Wasserstein learning for word embedding and topic model- ing. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesa-Bianchi, and R. Garnett, editors, Advances in Neural Information Processing Systems 31, pages 1722-1731. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Ku: Word sense disambiguation by substitution", |
|
"authors": [ |
|
{ |
|
"first": "Deniz", |
|
"middle": [], |
|
"last": "Yuret", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 4th International Workshop on Semantic Evaluations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "207--213", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deniz Yuret. 2007. Ku: Word sense disambi- guation by substitution. In Proceedings of the 4th International Workshop on Semantic Evaluations, pages 207-213. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Inter and intra topic structure learning with word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "He", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lan", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wray", |
|
"middle": [], |
|
"last": "Buntine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingyuan", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Conference on Machine Learning (ICML)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5887--5896", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "He Zhao, Lan Du, Wray Buntine, and Mingyuan Zhou. 2018. Inter and intra topic structure learning with word embeddings. In International Conference on Machine Learning (ICML), pages 5887-5896.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "The Variational Auto-Encoder framework for the Joint Topic Word-embedding (JTW) model. Boxes are ''plates'' indicating replicates. Shaded circles represent the observed variables. \u03b2 is a T \u00d7 V matrix representing corpus-wide latent topics.", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Topic coherence scores versus number of topics.", |
|
"uris": null |
|
}, |
|
"TABREF0": { |
|
"html": null, |
|
"num": null, |
|
"text": "Training of JTW model Input: pivot words x 1:N , context windows w 1:N , learning rate \u03b7, learning rate decay lrDecay, maximum iterative number maxIter, batch size B, batch number N B ; Output: learned network parameters \u03b8, \u03c6; 1 Initialize \u03b8, \u03c6 randomly; 2 i \u2190 0, \u03b7 \u2190 0.0005; 3 For convenience, define x B = x n:n+B , w B = w n:n+B as a minibatch; 4 while \u03b8, \u03c6 not converged and i < maxIter do 5", |
|
"type_str": "table", |
|
"content": "<table/>" |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"num": null, |
|
"text": "Example topics discovered by JTW and MMSG, each topic is represented by the top 10 words sorted by their likelihoods. The topic labels are assigned manually. Semantically less coherent words are highlighted by italics.", |
|
"type_str": "table", |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |