|
{ |
|
"paper_id": "K17-1016", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:08:09.686696Z" |
|
}, |
|
"title": "Learning Word Representations with Regularization from Prior Knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Chia-Jung", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Xia", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Conventional word embeddings are trained with specific criteria (e.g., based on language modeling or co-occurrence) inside a single information source, disregarding the opportunity for further calibration using external knowledge. This paper presents a unified framework that leverages pre-learned or external priors, in the form of a regularizer, for enhancing conventional language model-based embedding learning. We consider two types of regularizers. The first type is derived from topic distribution by running latent Dirichlet allocation on unlabeled data. The second type is based on dictionaries that are created with human annotation efforts. To effectively learn with the regularizers, we propose a novel data structure, trajectory softmax, in this paper. The resulting embeddings are evaluated by word similarity and sentiment classification. Experimental results show that our learning framework with regularization from prior knowledge improves embedding quality across multiple datasets, compared to a diverse collection of baseline methods.", |
|
"pdf_parse": { |
|
"paper_id": "K17-1016", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Conventional word embeddings are trained with specific criteria (e.g., based on language modeling or co-occurrence) inside a single information source, disregarding the opportunity for further calibration using external knowledge. This paper presents a unified framework that leverages pre-learned or external priors, in the form of a regularizer, for enhancing conventional language model-based embedding learning. We consider two types of regularizers. The first type is derived from topic distribution by running latent Dirichlet allocation on unlabeled data. The second type is based on dictionaries that are created with human annotation efforts. To effectively learn with the regularizers, we propose a novel data structure, trajectory softmax, in this paper. The resulting embeddings are evaluated by word similarity and sentiment classification. Experimental results show that our learning framework with regularization from prior knowledge improves embedding quality across multiple datasets, compared to a diverse collection of baseline methods.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Distributed representation of words (or word embedding) has been demonstrated to be effective in many natural language processing (NLP) tasks (Bengio et al., 2003; Collobert and Weston, 2008; Turney and Pantel, 2010; Collobert et al., 2011; Mikolov et al., 2013b,d; Weston et al., 2015) . Conventional word embeddings are trained with a single objective function (e.g., language modeling (Mikolov et al., 2013c) or word co-occurrence factorization (Pennington et al., 2014) ), which restricts the capability of the learned embeddings from integrating other types of knowledge. Prior work has leveraged relevant sources to obtain embeddings that are best suited for the target tasks, such as Maas et al. (2011) using a sentiment lexicon to enhance embeddings for sentiment classification. However, learning word embeddings with a particular target makes the approach less generic, also implying that customized adaptation has to be made whenever a new knowledge source is considered.", |
|
"cite_spans": [ |
|
{ |
|
"start": 142, |
|
"end": 163, |
|
"text": "(Bengio et al., 2003;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 164, |
|
"end": 191, |
|
"text": "Collobert and Weston, 2008;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 192, |
|
"end": 216, |
|
"text": "Turney and Pantel, 2010;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 217, |
|
"end": 240, |
|
"text": "Collobert et al., 2011;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 241, |
|
"end": 265, |
|
"text": "Mikolov et al., 2013b,d;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 266, |
|
"end": 286, |
|
"text": "Weston et al., 2015)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 388, |
|
"end": 411, |
|
"text": "(Mikolov et al., 2013c)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 448, |
|
"end": 473, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 691, |
|
"end": 709, |
|
"text": "Maas et al. (2011)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Along the lines of improving embedding quality, semantic resources have been incorporated as guiding knowledge to refine objective functions in a joint learning framework Xu et al., 2014; Yu and Dredze, 2014; Nguyen et al., 2016) , or used for retrofitting based on word relations defined in the semantic lexicons (Faruqui et al., 2015; Kiela et al., 2015) . These approaches, nonetheless, require explicit word relations defined in semantic resources, which is a difficult prerequisite for knowledge preparation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 171, |
|
"end": 187, |
|
"text": "Xu et al., 2014;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 188, |
|
"end": 208, |
|
"text": "Yu and Dredze, 2014;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 209, |
|
"end": 229, |
|
"text": "Nguyen et al., 2016)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 314, |
|
"end": 336, |
|
"text": "(Faruqui et al., 2015;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 356, |
|
"text": "Kiela et al., 2015)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Given the above challenges, we propose a novel framework that extends typical context learning by integrating external knowledge sources for enhancing embedding learning. Compared to a well known work by Faruqui et al. (2015) that focused on tackling the task using a retrofitting 1 framework on semantic lexicons, our method has an emphasis on joint learning where two objectives are considered for optimization simultaneously. In the meantime, we design a general-purpose infrastructure which can incorporate arbitrary external sources into learning as long as the sources can be encoded into vectors of numerical values (e.g. multi-hot vector according to the topic distributions from a topic model). In prior work by Yu and Dredze (2014) and Kiela et al. (2015) , the ex-ternal knowledge has to be clustered beforehand according to their semantic relatedness (e.g., cold, icy, winter, frozen), and words of similar meanings are added as part of context for learning. This may set a high bar for preparing external knowledge since finding the precise word-word relations is required. Our infrastructure, on the other hand, is more flexible as knowledge that is learned elsewhere, such as from topic modeling or even a sentiment lexicon, can be easily encoded and incorporated into the framework to enrich embeddings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 204, |
|
"end": 225, |
|
"text": "Faruqui et al. (2015)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 721, |
|
"end": 741, |
|
"text": "Yu and Dredze (2014)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 746, |
|
"end": 765, |
|
"text": "Kiela et al. (2015)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The way we integrate external knowledge is performed by the notion of a regularizer, which is an independent component that can be connected to the two typical architectures, namely, continuous bag-of-words (CBOW) and skip-gram (SG), or used independently as a retrofitter. We construct the regularizers based on the knowledge learned from both unlabeled data and manually crafted information sources. As an example of the former, a topic model from latent Dirichlet allocation (LDA) (Blei et al., 2003) is first generated from a given corpus, based on which per-word topical distributions are then added as extra signals to aid embedding learning. As an example of the latter, one can encode a dictionary into the regularizer and thus adapt the learning process with the encoded knowledge.", |
|
"cite_spans": [ |
|
{ |
|
"start": 484, |
|
"end": 503, |
|
"text": "(Blei et al., 2003)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Another contribution of this paper is that we propose a novel data structure, trajectory softmax, to effectively learn prior knowledge in the regularizer. Compared to conventional tree based hierarchical softmax, trajectory softmax can greatly reduce the space complexity when learning over a high-dimension vector. Our experimental results on several different tasks have demonstrated the effectiveness of our approach compared to up-todate studies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The rest of the paper is organized as follows. In section 2, we describe in detail our framework and show how we learn the regularizer in section 3. Section 4 presents and analyzes our experimental results and section 5 surveys related work. Finally, conclusions and directions of future work are discussed in section 6.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Conventionally word embeddings are learned from word contexts. In this section, we describe our method of extending embedding learning to incorporate other types of information sources.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Previous work has shown that many different sources can help learn better embeddings, such as semantic lexicons (Yu and Dredze, 2014; Faruqui et al., 2015; Kiela et al., 2015) or topic distributions (Maas et al., 2011; Liu et al., 2015b) . To provide a more generic solution, we propose a unified framework that learns word embeddings from context (e.g., CBOW or SG) together with the flexibility of incorporating arbitrary external knowledge using the notion of a regularizer. Details are unfolded in following subsections.", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 133, |
|
"text": "(Yu and Dredze, 2014;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 134, |
|
"end": 155, |
|
"text": "Faruqui et al., 2015;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 156, |
|
"end": 175, |
|
"text": "Kiela et al., 2015)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 199, |
|
"end": 218, |
|
"text": "(Maas et al., 2011;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 219, |
|
"end": 237, |
|
"text": "Liu et al., 2015b)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Preliminaries: The fundamental principle for learning word embeddings is to leverage word context, with a general goal of maximizing the likelihood that a word is predicted by its context. For example, the CBOW model can be formulated as maximizing", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Proposed Learning Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "L = 1 |V | |V | i=1 log p(w i | 0<|j|\u2264c \u03c5 i+j ), \u2200 w i \u2208 V", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Proposed Learning Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "(1) where \u03c5 i+j refers to the embedding of a word in w i+c i\u2212c , and c defines the window size of words adjacent to the word w i . The optimization for L over the entire corpus is straightforward.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Proposed Learning Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The left part of Figure 1 illustrates the concept of such context learning. It is a typical objective function for language modeling, where w i is learned by the association with its neighboring words. Since context greatly affects the choice of the current word, this modeling strategy can help finding reasonable semantic relationships among words.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 17, |
|
"end": 25, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The Proposed Learning Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Regularizer: To incorporate additional sources for embedding learning, we introduce the notion of a regularizer, which is designed to encode information from arbitrary knowledge corpora.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Proposed Learning Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Given a knowledge resource \u03a8, one can encode the knowledge carried by a word w with \u03c8(w), where \u03c8 can be any function that maps w to the knowledge it encapsulates. For example, a word has a topic vector \u03c8", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Proposed Learning Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "(w) = \u2212\u2212\u2192 e (w i ) \u03a6 [1:K,:] , resulting \u03c8(w) = \u2212 \u2192 \u03a6 w = (\u03c6 1,w , \u03c6 2,w , ..., \u03c6 K,w ), where \u03a6 [1:K,:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Proposed Learning Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "] is the topic distribution matrix for all words with K topics; \u2212\u2212\u2192 e (w i ) is the standard basis vector with 1 at the i-th position in the vocabulary V . Therefore, regularization for all w with given Figure 1 : Illustration of joint learning word embeddings with context and regularization from prior knowledge. The green lines refer to the prediction and the red dotted lines refer to the updating process. a knowledge source can be conceptually used to maximize w\u2208V R(\u03c5), where R is the regularizer, defined as a function of the embedding \u03c5 of a given word w and formulated as:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 203, |
|
"end": 211, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The Proposed Learning Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "R(\u03c5) = log p(\u03c8(w)|\u03c5), \u2200 w \u2208 V, \u03a8", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "The Proposed Learning Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The right part of Figure 1 shows an instantiation of a regularizer that encodes prior knowledge of vocabulary size |V |, each with D dimensions.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 18, |
|
"end": 26, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The Proposed Learning Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Joint Learning: To extend conventional embedding learning, we combine context learning from an original corpus with external knowledge encoded by a regularizer, where the shared vocabulary set forms a bridge connecting the two spaces. In particular, the objective function for CBOW with integrating the regularizer can be formulated as maximizing", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Proposed Learning Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "L = 1 |V | |V | i=1 log p(w i , \u03c8(w i ) | 0<|j|\u2264c \u03c5 i+j ) (3)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Proposed Learning Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where not only w i , but also R(w i ) is predicted by the context words w i+j via their embeddings \u03c5 i+j . Figure 1 as a whole illustrates this idea. Recall that each row of the matrix corresponds to a vector of a word in V , representing prior knowledge across D dimensions (e.g., semantic types, classes or topics). When learning/predicting a word within this framework, the model needs to predict not only the correct word as shown in the context learning part in the figure, but also the correct vector in the regularizer. In doing so, the prior knowledge will be carried to word embed-dings from regularization to context learning by back-propagation through the gradients obtained from the learning process based on the regularization matrix.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 115, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The Proposed Learning Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Retrofitting: With joint learning as our goal, we should emphasize that the proposed framework supports simultaneous context learning and prior knowledge retrofitting with a unified objective function. This means that the retrofitters can be considered as a stand-alone component at disposal, where the external knowledge vectors are regarded as supervised-learning target and the embeddings are updated through the course of fitting to the target. In \u00a74, we will evaluate the performance of both joint learner and retrofitter in detail.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Proposed Learning Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "As shown in Equation 3, prior knowledge participates in the optimization process for predicting the current word and contributes to embedding updating during training a CBOW model. Using stochastic gradient descent (SGD), embeddings can be easily updated by both objective functions for language modeling and regularization through:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Estimation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u03c5 i+j = \u03c5 i+j \u2212\u03bb\u2207 \u03c5 [log p(w i | 0<|j|\u2264c \u03c5 i+j )+R(\u03c5 i )] (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Estimation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where R is defined as in Eq.2 for \u03c8(w i ). For SG model, prior knowledge is introduced in a similar way, with the difference being that context words are predicted instead of the current word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Estimation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Therefore, when learned from the context, em- beddings are updated in the same way as in normal CBOW and SG models. When learned from the regularizer, embeddings are updated via a supervised learning over \u03a8, on the condition that \u03a8 is appropriately encoded by \u03c8. The details of how it is performed will be illustrated in the next subsection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Estimation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Hierarchical softmax is a good choice for reducing the computational complexity when training probabilistic neural network language models. Therefore, for context learning on the left part of Figure 1, we continue using hierarchical softmax based on Huffman coding tree (Mikolov et al., 2013a) . Typically to encode the entire vocabulary, the depth of the tree falls in a manageable range around 15 to 18. However, different from learning context words, to encode a regularizer as shown on the right part of Figure 1 , using hierarchical softmax is intractable due to exponential space demand. Consider words expressed with D-dimensional vectors in a regularizer, a tree-based hierarchical softmax may require 2 D \u2212 1 nodes, as illustrated in the left hand side of Figure 2 . Since each node contains a d-dimensional \"node vector\" that is to be updated through training, the total space required is O(2 D \u2022 d) for hierarchical softmax to encode the regularizer. When D is very large, such as D = 50 meaning that tree depth is 50, the space demand tends to be unrealistic as the number of nodes in the tree grows to 2 50 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 270, |
|
"end": 293, |
|
"text": "(Mikolov et al., 2013a)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 192, |
|
"end": 198, |
|
"text": "Figure", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 508, |
|
"end": 516, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 765, |
|
"end": 773, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Trajectory Softmax", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "To avoid the exponential requirement in space, in this work, we propose a trajectory softmax activation to effectively learn over the D-dimensional vectors. Our approach follows a grid hierarchical structure along a path when conducting learning in the regularizer. From the right hand side of Figure 2, we see that the same regularizer entry is encoded with a path of D nodes, using a grid structure instead of a tree one. Consequently the total space required will be reduced to O(2", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 294, |
|
"end": 300, |
|
"text": "Figure", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Trajectory Softmax", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u2022 D \u2022 d).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Trajectory Softmax", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "As a running example, Figure 2 shows that when D = 4, the conventional hierarchical softmax needs at least 15 nodes to perform softmax over the path, while trajectory softmax greatly reduces space to only 7 nodes. Compared to treebased hierarchical softmax, the paths in trajectory softmax are not branches of a tree, but a fully connected grid of nodes with space complexity of D \u00d7 |C| in general. Here |C| refers to the number of choices on the paths for a node to the next node, and thus |C| = 2 is the binary case. In Figure 2 , we see an activation trajectory for a sequence of \"Root\u2192100\" for encoding word w 5 . w t is then learned and updated through the nodes on the trajectory when w 5 is predicted by w t . The learning and updating are referred by the dashed arrow lines. Overall, trajectory softmax greatly reduces the space complexity than hierarchical softmax, especially when words sharing similar information, in which case the paths of these words will be greatly overlapped.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 30, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 522, |
|
"end": 530, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Trajectory Softmax", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "More formally, learning with trajectory softmax in the binary case is similar to hierarchical softmax, which is to maximize p over the path for a vector encoded in \u03c8(w), where p is defined below with an input vector \u03c5:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Trajectory Softmax", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p(\u03c8(w)|\u03c5) = D\u22121 i=1 \u03c3( n(i + 1) \u2022 \u03c5 i \u03c5)", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Trajectory Softmax", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "where \u03c5 i is the inner vector in i-th node on the trajectory. n(i + 1) = 1 or \u22121 when (i + 1)-th node is encoded with 0 or 1, respectively. The final update to word embedding \u03c5 with the regularizer is conducted by:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Trajectory Softmax", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u03c5 * = \u03c5 \u2212 \u03b3(\u03c3(\u03c5 i \u03c5) \u2212 t i ) \u2022 \u03c5 i (6)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Trajectory Softmax", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "which is applied to i = 1, 2, ..., D \u2212 1, where \u03c3(x) = exp(x)/(1 + exp(x)); t i = n(i + 1) ; \u03b3 is a discount learning rate.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Trajectory Softmax", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Since the design of trajectory softmax is compatible with the conventional hierarchical softmax, one can easily implement the joint learning by concatenating its Root with the terminal node in the hierachical tree. The learning process is thus to traverse all the nodes from the hierarchical tree and the trajectory path.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Trajectory Softmax", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "We consider two categories of information sources for constructing regularizers. The first type of regularizer is built based on resources without annotation. On the contrary, the second type uses text collections with annotation. For brevity, throughout the paper we refer to the former as unannotated regularizer whereas the latter is recognized as annotated regularizer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Constructing Regularizers", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The unannotated regularizer constructs its regularization matrix based on an LDA learned topic distribution, which reflects topical salience information of a given word from prior knowledge. Using LDA not only serves our purpose of learning according to word semantics reflected by cooccurrences but can also bring in knowledge inexpensively (i.e., no annotations needed).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unannotated Regularizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To start, a classic LDA is first performed on an arbitrary base corpus for retrieving word topical distribution, resulting in a topic model with K topics. All the units in the corpus are then assigned with a word-topic probability \u03c6 i corresponding to topic k, based on which a matrix is formed with all \u2212 \u2192 \u03a6 w , as described in \u00a72.1. Next we convert each \u2212 \u2192 \u03a6 into a 0-1 vector based on the maximum values in \u2212 \u2192 \u03a6 . In particular, positions with maximum values are set to 1 and the rest are set to 0 (e.g.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unannotated Regularizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "[0.1, 0.1, 0.4, 0.4] \u2192 [0, 0, 1, 1]). This converted matrix functions as the final regularization matrix as shown in right hand side of Figure 1 . We set K = 50 in our experiments. 2 An in-house LDA implementation 3 is used for training \u03a6 [1:K,:] , with 1,000 iterations.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 136, |
|
"end": 144, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Unannotated Regularizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We use three sources for training annotated regularizers in this work. Two of the sources are semantic lexicons, namely, the Paraphrase Database (PPDB) 4 (Ganitkevitch et al., 2013) and synonyms in the WordNet (WN syn ) 5 (Miller, 1995) . They are used in the word similarity task. The third source is a semantic dictionary, SentiWordNet 3.0 (SWN) (Baccianella et al., 2010) , which is used in the sentiment classification task. All of the three sources were created with annotation efforts, where either lexical or semantic relations were provided by human experts beforehand.", |
|
"cite_spans": [ |
|
{ |
|
"start": 154, |
|
"end": 181, |
|
"text": "(Ganitkevitch et al., 2013)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 222, |
|
"end": 236, |
|
"text": "(Miller, 1995)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 348, |
|
"end": 374, |
|
"text": "(Baccianella et al., 2010)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotated Regularizer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Before constructing the regularizer, we need encode each word in the sources as a vector according to its relations to other words or predefined information. For PPDB and WN syn , we use them in different ways for joint learning and retrofitting. In order to optimize the efficiency in joint learning, we compress the word relations with topic representations. We use an LDA learner to get topic models for the lexicons 6 , with K = 50. Therefore, the word relations are transferred into topic distributions that are learned from their cooccurrences defined in the lexicon. The way we construct regularization matrix may be lossy, risking losing information that is explicitly delivered in the lexicon. However, it provides us effective encodings for words, and also yields better learning performance empirically in our experiments. In retrofitting, we directly use words' adjacent matrices extracted from their relations defined in the lexicons, then take the adjacent vector for each word as the regularization vector.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotated Regularizer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The SWN includes 83K words (147K words and phrases in total). Every word in SWN has two scores for its degree towards positive and negative polarities. For example, the word \"pretty\" receives 0.625 and 0 for positive and negative respectively, which means it is strongly associated with positive sentiment. The scores range from 0 to 1 with step Table 1 : Word similarity results for joint learning on three datasets in terms of Pearson's coefficient correlation (\u03b3) and Spearman's rank correlation (\u03c1) in percentages. Higher score indicates better correlation of the model with respect to the gold standard. Bold indicates the highest score for each embedding type. of 0.125 for both positive and negative polarities. Therefore there are 9 different degrees for a word to be annotated for the two sentiments. For encoding this dictionary, we design a 18-dimension vector, in which the first 9 dimension represents the positive sentiment while the last 9 for negative sentiment. A word is thus encoded into a binary form where the corresponding dimension is set to 1 with others 0. For the aforementioned word \"pretty\", its encoded vector will be \"000001000 000000000\", in which the score 0.625 of positive activates the 6th dimension in the vector. In doing so, we form a 83K \u00d7 18 regularization matrix for the SWN dictionary.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 346, |
|
"end": 353, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Annotated Regularizer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The resulting word embeddings based on joint learning as well as retrofitting are evaluated intrinsically and extrinsically. For intrinsic evaluation, we use word similarity benchmark to directly test the quality of the learned embeddings. For extrinsic evaluation, we use sentiment analysis as a downstream task with different input embeddings. Regularizers based on LDA, PPDB and WN syn are used in word similarity experiment, while SentiWordNet regularization is used in sentiment analysis. The experimental results will be discussed in \u00a74.1 and \u00a74.2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We experiment with three learning paradigms, namely CBOW, SG and GloVe. GloVe is only tested in retrofitting since our regularizer is not compatible with GloVe learning objective in joint learning. In all of our retrofitting experiments, we only train the regularizer with one iteration, consistent with Kiela et al. (2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 304, |
|
"end": 323, |
|
"text": "Kiela et al. (2015)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The base corpus that we used to train initial word embeddings is from the latest articles dumped from Wikipedia and newswire 7 , which contains approximately 8 billion words. When training on this corpus, we set the dimension of word embeddings to be 200 and cutoff threshold of word frequency threshold to be 5 times of occurrence. These are common setups shared across the following experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We use the MEN-3k (Bruni et al., 2012) , SimLex-999 and WordSim-353 (Finkelstein et al., 2002) datasets to perform quantitative comparisons among different approaches to generating embeddings. The cosine scores are computed between the vectors of each pair of words in the datasets 8 . The measures adopted are Pearson's coefficient of product-moment correlation (\u03b3) and Spearman's rank correlation (\u03c1), which reflect how Table 2 : Word similarity results for retrofitting on three datasets in terms of Pearson's coefficient correlation (\u03b3) and Spearman's rank correlation (\u03c1) in percentages. Higher score indicates better correlation of the model with respect to the gold standard. Bold indicates the highest score for each embedding type.", |
|
"cite_spans": [ |
|
{ |
|
"start": 18, |
|
"end": 38, |
|
"text": "(Bruni et al., 2012)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 56, |
|
"end": 94, |
|
"text": "WordSim-353 (Finkelstein et al., 2002)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 422, |
|
"end": 429, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Word Similarities Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "close the similarity scores to human judgments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Similarities Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For both joint learning and retrofitting, we test our approach with using PPDB and WN syn as the prior knowledge applied to our regularizer. Considering that LDA can be regarded as soft clustering for words, it is very hard to present words with deterministic relations like in PPDB and WN syn , therefore we do not apply retrofitting on LDA results for previous studies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Similarities Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The evaluation results are shown in Table 1 and Table 2 for joint learning and retrofitting, respectively. Each block in the tables indicates an embedding type and its corresponding enhancement approaches. For comparison, we also include the results from the approaches proposed in previous studies, i.e., Yu and Dredze (2014) 9 for CBOW, Kiela et al. (2015) 10 for SG and Faruiqui et al. (2015) 11 for all initial embeddings. Their settings are equal to that used in our approach. Table 1 shows that directly using LDA topic distributions as embeddings can give reasonable results for word similarities. Because LDA captures word co-occurrences globally so that words share similar contexts are encoded similarly via topic distributions. This is a good indication showing that LDA could be a useful guidance to help our regularize to incorporate global information.", |
|
"cite_spans": [ |
|
{ |
|
"start": 340, |
|
"end": 359, |
|
"text": "Kiela et al. (2015)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 56, |
|
"text": "Table 1 and Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 483, |
|
"end": 490, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Word Similarities Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For other joint learning results in Table 1 , our approach shows significant gain over the baselines, the same for the approaches from previous studies (Yu and Dredze, 2014; Faruqui et al., 2015) . However, using WN syn in Kiela et al. (2015) does not help, this may owe to the fact that using the words defined in WN syn as contexts will affect the real context learning and thus deviate the joint objective function. Interestingly, using LDA in regularizer significantly boosts the performance on MEN-3k, even better than that with using semantic lexicons. The reason might be that LDA enhances word embeddings with the relatedness inherited in topic distributions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 152, |
|
"end": 173, |
|
"text": "(Yu and Dredze, 2014;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 174, |
|
"end": 195, |
|
"text": "Faruqui et al., 2015)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 223, |
|
"end": 242, |
|
"text": "Kiela et al. (2015)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 43, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Word Similarities Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For retrofitting, Table 2 shows that our approach demonstrates its effectiveness for enhancing initial embeddings with prior knowledge. It performs consistently better than all other approaches in a wide range of settings, including three embedding types on three datasets, with few exceptions. Since retrofitting only updates those words in the external sources, e.g., LDA word list or lexicons, it is very sensitive to the quality of the corresponding sources. Consequently, it can be observed from our experiment that unannotated knowledge, i.e., topic distributions, is not an effective source as a good guidance. In contrast, PPDB, which is of high quality of semantic knowledge, outperforms other types of information in most cases.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 18, |
|
"end": 25, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Word Similarities Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We perform sentiment classification on the IMDB review data set (Maas et al., 2011) , which has 50K labeled samples with equal number of positive and negative reviews. The data set is pre-divided into training and test sets, with each set containing 25K reviews. The classifier is based on a bi-directional LSTM model as described in Dai and Le 2015, with one hidden layer of 1024 units. Embeddings from different approaches are used as inputs for the LSTM classifier. For determining the hyperparameters (e.g., training epoch and learning rate), we use 15% of the training data as the validation set and we apply early stopping strategy when the error rate on the validation set starts to increase. Note that the final model for testing is trained on the entire training set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 64, |
|
"end": 83, |
|
"text": "(Maas et al., 2011)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Classification Evaluation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "As reported in Table 3 , the embeddings trained by our approach work effectively for sentiment classification. Both joint learning and retrofitting with our regularizer outperform other baseline approaches from previous studies, with joint learning being somewhat better than retrofitting. Overall, our joint learning with CBOW achieves the best performance on this task. A ten-partition twotailed paired t-test at p < 0.05 level is performed on comparing each score with the baseline result for each embedding type. Considering that sentiment is not directly related to word meaning, the results indicate that our regularizer is capable of incorporating different type of knowledge for a specific task, even if it is not aligned with the context learning. This task demonstrates the potential of our framework for encoding external knowledge and using it to enrich the representa-Embeddings Accuracy Maas et al. (2011) 88.89 GloVe 90.66 Faruqui et al. (2015) (Maas et al., 2011) . Bold indicates the highest score for each embedding type. * indicates t-test significance at p < 0.05 level when compared with the baseline.", |
|
"cite_spans": [ |
|
{ |
|
"start": 901, |
|
"end": 919, |
|
"text": "Maas et al. (2011)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 938, |
|
"end": 959, |
|
"text": "Faruqui et al. (2015)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 960, |
|
"end": 979, |
|
"text": "(Maas et al., 2011)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 22, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sentiment Classification Evaluation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "tions of words, without the requirement to build a task-specific, customized model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Classification Evaluation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Early research on representing words as distributed continuous vectors dates back to Rumelhart et al. (1986) . Recent previous studies (Collobert and Weston, 2008; Collobert et al., 2011) showed that, the quality of embeddings can be improved when training multi-task deep models on task-specific corpora, domain knowledge that is learned over the process. Yet one downside is that huge amounts of labeled data is often required. Another methodology is to update embeddings by learning with external knowledge. Joint learning and retrofitting are two mainstreams of this methodology. Leveraging semantic lexicons (Yu and Dredze, 2014; Faruqui et al., 2015; Liu et al., 2015a; Kiela et al., 2015; Wieting et al., 2015; Nguyen et al., 2016) or word distributional information (Maas et al., 2011; Liu et al., 2015b) has been proven as effective in enhancing word embeddings, especially for specific downstream tasks. proposed to improve embedding learning with different kinds of knowledge, such as morphological, syntactic and semantic information. Wieting et al. (2015) improves embeddings by leveraging paraphrase pairs from the PPDB for learning phrase embeddings in the paraphrasing task. In a similar way, Hill et al. (2016) uses learned word embeddings as supervised knowledge for learning phrase embeddings. Although our approach is conceptually similar to previous work, it is different in several ways. For leveraging unlabeled data, the regularizer in this work is different from applying topic distributions as word vectors (Maas et al., 2011) or treating topics as conditional contexts (Liu et al., 2015b) . For leveraging semantic knowledge, our regularizer does not require explicit word relations as used in previous studies (Yu and Dredze, 2014; Faruqui et al., 2015; Kiela et al., 2015) , but takes encoded information of words. Moreover, in order to appropriately learn the encoded information, we use trajectory softmax to perform the regularization. As a result, it provides a versatile data structure to incorporate any vectorized information into embedding learning. The above novelties make our approach versatile so that it can integrate different types of knowledge.", |
|
"cite_spans": [ |
|
{ |
|
"start": 85, |
|
"end": 108, |
|
"text": "Rumelhart et al. (1986)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 150, |
|
"end": 163, |
|
"text": "Weston, 2008;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 164, |
|
"end": 187, |
|
"text": "Collobert et al., 2011)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 613, |
|
"end": 634, |
|
"text": "(Yu and Dredze, 2014;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 635, |
|
"end": 656, |
|
"text": "Faruqui et al., 2015;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 657, |
|
"end": 675, |
|
"text": "Liu et al., 2015a;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 676, |
|
"end": 695, |
|
"text": "Kiela et al., 2015;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 696, |
|
"end": 717, |
|
"text": "Wieting et al., 2015;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 718, |
|
"end": 738, |
|
"text": "Nguyen et al., 2016)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 774, |
|
"end": 793, |
|
"text": "(Maas et al., 2011;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 794, |
|
"end": 812, |
|
"text": "Liu et al., 2015b)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1047, |
|
"end": 1068, |
|
"text": "Wieting et al. (2015)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1209, |
|
"end": 1227, |
|
"text": "Hill et al. (2016)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1533, |
|
"end": 1552, |
|
"text": "(Maas et al., 2011)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1596, |
|
"end": 1615, |
|
"text": "(Liu et al., 2015b)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1738, |
|
"end": 1759, |
|
"text": "(Yu and Dredze, 2014;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 1760, |
|
"end": 1781, |
|
"text": "Faruqui et al., 2015;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1782, |
|
"end": 1801, |
|
"text": "Kiela et al., 2015)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this paper we proposed a regularization framework for improving the learning of word embeddings with explicit integration of prior knowledge. Our approach can be used independently as a retrofitter or jointly with CBOW and SG to encode prior knowledge. We proposed trajectory softmax for learning over the regularizer, which can greatly reduce the space complexity compared to hierarchical softmax using the Huffman coding tree, which enables the regularizer to learn over a long vector. Moreover, the regularizer can be constructed from either unlabeled data (e.g., LDA trained from the base corpus) or manually crafted resources such as a lexicon. Experiments on word similarity evaluation and sentiment classification show the benefits of our approach.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "For the future work, we plan to evaluate the effectiveness of this framework with other types of prior knowledge and NLP tasks. We also want to explore different ways of encoding external knowledge for regularization.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In their study, joint learning was reported to be less effective than retrofitting.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We experimented with other numbers for K, and their performance didn't vary too much when K > 40. We didn't include this comparison due to the similar results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "It is a Markov Chain Monte Carlo (MCMC) based LDA using Gibbs sampling.4 We use PPDB-XL in this paper.5 We use WNsyn because in our experiment only using synonyms perform better than using synonyms, hypernyms and hyponyms.6 The lexicons are organized in the similar way as inFaruqui et al. (2015), where synonyms are grouped together and treated as a document for LDA learning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This corpus is constructed by the script demo-train-bigmodel-v1.sh from https://storage.googleapis.com/googlecode-archive-source/v2/code.google.com/word2vec/sourcearchive.zip 8 For LDA embeddings (topic distributions), we tried Jenson-Shannon divergence, which is much worse than cosine scores in measuring the similarity. Therefore we still use cosine for LDA embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/Gorov/JointRCM 10 We re-implemented their approach in our own code. 11 https://github.com/mfaruqui/retrofitting", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "SentiWordNet 3.0: An Enhanced Lexical Resource for Sentiment Analysis and Opinion Mining", |
|
"authors": [ |
|
{ |
|
"first": "Stefano", |
|
"middle": [], |
|
"last": "Baccianella", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Esuli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabrizio", |
|
"middle": [], |
|
"last": "Sebastiani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the Seventh conference on International Language Resources and Evaluation (LREC'10). European Language Resources Association (ELRA)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stefano Baccianella, Andrea Esuli, and Fabrizio Sebas- tiani. 2010. SentiWordNet 3.0: An Enhanced Lex- ical Resource for Sentiment Analysis and Opinion Mining. In Proceedings of the Seventh conference on International Language Resources and Evalua- tion (LREC'10). European Language Resources As- sociation (ELRA), Valletta, Malta.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "A neural probabilistic language model", |
|
"authors": [ |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R\u00e9jean", |
|
"middle": [], |
|
"last": "Ducharme", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascal", |
|
"middle": [], |
|
"last": "Vincent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Janvin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "J. Mach. Learn. Res", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "1137--1155", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoshua Bengio, R\u00e9jean Ducharme, Pascal Vincent, and Christian Janvin. 2003. A neural probabilistic lan- guage model. J. Mach. Learn. Res. 3:1137-1155.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Knowledge-Powered Deep Learning for Word Embedding", |
|
"authors": [ |
|
{ |
|
"first": "Jiang", |
|
"middle": [], |
|
"last": "Bian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bin", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tie-Yan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the European Conference on Machine Learning and Knowledge Discovery in Databases", |
|
"volume": "8724", |
|
"issue": "", |
|
"pages": "132--148", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiang Bian, Bin Gao, and Tie-Yan Liu. 2014. Knowledge-Powered Deep Learning for Word Em- bedding. In Proceedings of the European Confer- ence on Machine Learning and Knowledge Discov- ery in Databases -Volume 8724. New York, NY, USA, ECML PKDD 2014, pages 132-148.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Latent Dirichlet Allocation", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Blei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Jordan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "993--1022", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David M. Blei, Andrew Y. Ng, and Michael I. Jordan. 2003. Latent Dirichlet Allocation. Journal of Ma- chine Learning Research 3:993-1022.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Distributional Semantics in Technicolor", |
|
"authors": [ |
|
{ |
|
"first": "Elia", |
|
"middle": [], |
|
"last": "Bruni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gemma", |
|
"middle": [], |
|
"last": "Boleda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Baroni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nam Khanh", |
|
"middle": [], |
|
"last": "Tran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "136--145", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elia Bruni, Gemma Boleda, Marco Baroni, and Nam Khanh Tran. 2012. Distributional Semantics in Technicolor. In Proceedings of the 50th Annual Meeting of the Association for Computational Lin- guistics (Volume 1: Long Papers). Jeju Island, Ko- rea, pages 136-145.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A unified architecture for natural language processing: Deep neural networks with multitask learning", |
|
"authors": [ |
|
{ |
|
"first": "Ronan", |
|
"middle": [], |
|
"last": "Collobert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 25th International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "160--167", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ronan Collobert and Jason Weston. 2008. A unified architecture for natural language processing: Deep neural networks with multitask learning. In Pro- ceedings of the 25th International Conference on Machine Learning. ACM, New York, NY, USA, ICML '08, pages 160-167.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Natural Language Processing (Almost) from Scratch", |
|
"authors": [ |
|
{ |
|
"first": "Ronan", |
|
"middle": [], |
|
"last": "Collobert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L\u00e9on", |
|
"middle": [], |
|
"last": "Bottou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Karlen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Koray", |
|
"middle": [], |
|
"last": "Kavukcuoglu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pavel", |
|
"middle": [], |
|
"last": "Kuksa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2493--2537", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ronan Collobert, Jason Weston, L\u00e9on Bottou, Michael Karlen, Koray Kavukcuoglu, and Pavel Kuksa. 2011. Natural Language Processing (Almost) from Scratch. Journal of Machine Learning Research 12:2493-2537.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Semisupervised Sequence Learning", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Andrew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in Neural Information Processing Systems 28: Annual Conference on Neural Information Processing Systems 2015", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3079--3087", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew M. Dai and Quoc V. Le. 2015. Semi- supervised Sequence Learning. In Advances in Neural Information Processing Systems 28: Annual Conference on Neural Information Processing Sys- tems 2015, December 7-12, 2015, Montreal, Que- bec, Canada. pages 3079-3087.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Retrofitting word vectors to semantic lexicons", |
|
"authors": [ |
|
{ |
|
"first": "Manaal", |
|
"middle": [], |
|
"last": "Faruqui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jesse", |
|
"middle": [], |
|
"last": "Dodge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sujay", |
|
"middle": [], |
|
"last": "Kumar Jauhar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1606--1615", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Manaal Faruqui, Jesse Dodge, Sujay Kumar Jauhar, Chris Dyer, Eduard Hovy, and Noah A. Smith. 2015. Retrofitting word vectors to semantic lexicons. In Proceedings of the 2015 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies. Denver, Colorado, pages 1606-1615.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Placing Search in Context: the Concept Revisited", |
|
"authors": [ |
|
{ |
|
"first": "Lev", |
|
"middle": [], |
|
"last": "Finkelstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evgeniy", |
|
"middle": [], |
|
"last": "Gabrilovich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yossi", |
|
"middle": [], |
|
"last": "Matias", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ehud", |
|
"middle": [], |
|
"last": "Rivlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zach", |
|
"middle": [], |
|
"last": "Solan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gadi", |
|
"middle": [], |
|
"last": "Wolfman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eytan", |
|
"middle": [], |
|
"last": "Ruppin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "ACM Transaction on Information Systems", |
|
"volume": "20", |
|
"issue": "1", |
|
"pages": "116--131", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lev Finkelstein, Evgeniy Gabrilovich, Yossi Matias, Ehud Rivlin, Zach Solan, Gadi Wolfman, and Ey- tan Ruppin. 2002. Placing Search in Context: the Concept Revisited. ACM Transaction on Informa- tion Systems 20(1):116-131.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "PPDB: The Paraphrase Database", |
|
"authors": [ |
|
{ |
|
"first": "Juri", |
|
"middle": [], |
|
"last": "Ganitkevitch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "758--764", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Juri Ganitkevitch, Benjamin Van Durme, and Chris Callison-Burch. 2013. PPDB: The Paraphrase Database. In Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies. Atlanta, Georgia, pages 758-764.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Learning to Understand Phrases by Embedding the Dictionary. Transactions of the Association for", |
|
"authors": [ |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Korhonen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Computational Linguistics", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "17--30", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Felix Hill, KyungHyun Cho, Anna Korhonen, and Yoshua Bengio. 2016. Learning to Understand Phrases by Embedding the Dictionary. Transac- tions of the Association for Computational Linguis- tics 4:17-30.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Simlex-999: Evaluating Semantic Models with Genuine Similarity Estimation", |
|
"authors": [ |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roi", |
|
"middle": [], |
|
"last": "Reichart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Korhonen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Computational Linguistics", |
|
"volume": "41", |
|
"issue": "4", |
|
"pages": "665--695", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Felix Hill, Roi Reichart, and Anna Korhonen. 2015. Simlex-999: Evaluating Semantic Models with Gen- uine Similarity Estimation. Computational Linguis- tics 41(4):665-695.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Specializing word embeddings for similarity or relatedness", |
|
"authors": [ |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2044--2048", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Douwe Kiela, Felix Hill, and Stephen Clark. 2015. Specializing word embeddings for similarity or re- latedness. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Process- ing. Lisbon, Portugal, pages 2044-2048.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Learning semantic word embeddings based on ordinal knowledge constraints", |
|
"authors": [ |
|
{ |
|
"first": "Quan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hui", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Si", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhen-Hua", |
|
"middle": [], |
|
"last": "Ling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1501--1511", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Quan Liu, Hui Jiang, Si Wei, Zhen-Hua Ling, and Yu Hu. 2015a. Learning semantic word embed- dings based on ordinal knowledge constraints. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Lan- guage Processing (Volume 1: Long Papers). Beijing, China, pages 1501-1511.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Tat-Seng Chua, and Maosong Sun", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence. AAAI'15", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2418--2424", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Liu, Zhiyuan Liu, Tat-Seng Chua, and Maosong Sun. 2015b. Topical Word Embeddings. In Pro- ceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence. AAAI'15, pages 2418-2424.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Learning Word Vectors for Sentiment Analysis", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Maas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Daly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Potts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "142--150", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew L. Maas, Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng, and Christopher Potts. 2011. Learning Word Vectors for Sentiment Anal- ysis. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies. Portland, Oregon, USA, pages 142-150.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Efficient Estimation of Word Representations in Vector Space", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jef- frey Dean. 2013a. Efficient Estimation of Word Representations in Vector Space. arXiv preprint abs/1301.3781.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Exploiting Similarities among Languages for Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Quoc V. Le, and Ilya Sutskever. 2013b. Exploiting Similarities among Lan- guages for Machine Translation. arXiv preprint abs/1309.4168.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Distributed representations of words and phrases and their compositionality", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "26", |
|
"issue": "", |
|
"pages": "3111--3119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013c. Distributed representa- tions of words and phrases and their composition- ality. In C.J.C. Burges, L. Bottou, M. Welling, Z. Ghahramani, and K.Q. Weinberger, editors, Ad- vances in Neural Information Processing Systems 26, pages 3111-3119.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Linguistic Regularities in Continuous Space Word Representations", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yih", |
|
"middle": [], |
|
"last": "Wen-Tau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Zweig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "746--751", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Wen-tau Yih, and Geoffrey Zweig. 2013d. Linguistic Regularities in Continuous Space Word Representations. In Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies. Association for Computa- tional Linguistics, Atlanta, Georgia, pages 746-751.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "WordNet: A Lexical Database for English", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "George", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Commun. ACM", |
|
"volume": "38", |
|
"issue": "11", |
|
"pages": "39--41", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George A. Miller. 1995. WordNet: A Lexical Database for English. Commun. ACM 38(11):39-41.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Integrating distributional lexical contrast into word embeddings for antonymsynonym distinction", |
|
"authors": [ |
|
{ |
|
"first": "Sabine", |
|
"middle": [], |
|
"last": "Kim Anh Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ngoc", |
|
"middle": [ |
|
"Thang" |
|
], |
|
"last": "Schulte Im Walde", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Vu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "454--459", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kim Anh Nguyen, Sabine Schulte im Walde, and Ngoc Thang Vu. 2016. Integrating distributional lexical contrast into word embeddings for antonym- synonym distinction. In Proceedings of the 54th An- nual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers). Berlin, Ger- many, pages 454-459.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Glove: Global Vectors for Word Representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global Vectors for Word Representation. In Proceedings of the 2014 Con- ference on Empirical Methods in Natural Language Processing (EMNLP). Doha, Qatar, pages 1532- 1543.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Learning Representations by Backpropagating Errors", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Rumelhart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ronald", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Williams", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1986, |
|
"venue": "Nature", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "533--536", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David E. Rumelhart, Geoffrey E. Hinton, and Ronald J. Williams. 1986. Learning Representations by Back- propagating Errors. Nature pages 533-536.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "From Frequency to Meaning: Vector Space Models of Semantics", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Turney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pantel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Journal of Artificial Intelligence Research", |
|
"volume": "37", |
|
"issue": "1", |
|
"pages": "141--188", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter D. Turney and Patrick Pantel. 2010. From Fre- quency to Meaning: Vector Space Models of Se- mantics. Journal of Artificial Intelligence Research 37(1):141-188.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sumit", |
|
"middle": [], |
|
"last": "Chopra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Weston, Antoine Bordes, Sumit Chopra, and Tomas Mikolov. 2015. Towards AI-Complete Ques- tion Answering: A Set of Prerequisite Toy Tasks. arXiv preprint abs/1502.05698.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "From Paraphrase Database to Compositional Paraphrase Model and Back. Transactions of the Association for", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Wieting", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karen", |
|
"middle": [], |
|
"last": "Livescu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Computational Linguistics", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "345--358", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Wieting, Mohit Bansal, Kevin Gimpel, and Karen Livescu. 2015. From Paraphrase Database to Com- positional Paraphrase Model and Back. Transac- tions of the Association for Computational Linguis- tics 3:345-358.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "RC-NET: A General Framework for Incorporating Knowledge into Word Representations", |
|
"authors": [ |
|
{ |
|
"first": "Chang", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yalong", |
|
"middle": [], |
|
"last": "Bai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiang", |
|
"middle": [], |
|
"last": "Bian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bin", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoguang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tie-Yan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 23rd ACM International Conference on Conference on Information and Knowledge Management", |
|
"volume": "14", |
|
"issue": "", |
|
"pages": "1219--1228", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chang Xu, Yalong Bai, Jiang Bian, Bin Gao, Gang Wang, Xiaoguang Liu, and Tie-Yan Liu. 2014. RC-NET: A General Framework for Incorporating Knowledge into Word Representations. In Proceed- ings of the 23rd ACM International Conference on Conference on Information and Knowledge Man- agement. ACM, New York, NY, USA, CIKM '14, pages 1219-1228.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Improving lexical embeddings with semantic knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Mo", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Dredze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "545--550", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mo Yu and Mark Dredze. 2014. Improving lexical embeddings with semantic knowledge. In Proceed- ings of the 52nd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Pa- pers). Baltimore, Maryland, pages 545-550.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Comparison of hierarchical softmax (left) and trajectory softmax (right) based on an example of eight words in binary coding. The bold arrow lines refer to the path for encoding w 5 in both hierarchical and trajectory softmax.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "58.86 20.39 22.12 55.48 54.81 CBOW 62.93 65.84 28.34 28.31 68.50 66.67 Yu and Dredze (2014) +PPDB 65.35 65.84 35.56 33.30 72.75 72.43 +WN syn 65.20 65.74 36.15 33.65 72.79 72.58 +WN syn 66.58 68.14 36.72 35.91 68.50 67.90", |
|
"content": "<table><tr><td>Embeddings</td><td/><td>MEN-3k \u03b3 \u03c1</td><td>SimLex-999 WordSim-353 \u03b3 \u03c1 \u03b3 \u03c1</td></tr><tr><td colspan=\"4\">LDA 57.17 This work +LDA 67.33 69.51 29.79 29.78 71.19 69.58 +PPDB 65.25 66.87 36.43 33.28 69.45 68.89</td></tr><tr><td/><td colspan=\"3\">+WN syn 64.42 66.98 33.86 33.69 66.13 67.11</td></tr><tr><td>SG</td><td/><td colspan=\"2\">64.79 66.71 26.97 26.59 68.88 67.80</td></tr><tr><td>Kiela et al. (2015)</td><td colspan=\"3\">+PPDB +WN syn 57.02 59.84 29.02 29.99 63.61 61.22 61.13 60.04 36.47 34.29 70.14 68.76</td></tr><tr><td/><td>+LDA</td><td colspan=\"2\">65.02 65.32 25.19 24.04 66.16 69.21</td></tr><tr><td>This work</td><td>+PPDB</td><td colspan=\"2\">70.83 71.35 37.10 35.72 73.94 73.11</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "GloVe 66.84 66.97 28.87 27.52 59.78 61.46 Faruqui et al. (2015) +PPDB 66.98 67.04 29.25 28.25 61.44 63.35 +WN syn 64.29 63.92 27.32 24.39 57.40 58.88 65.52 36.16 34.01 72.75 72.39 +WN syn 65.34 65.77 35.68 33.33 72.72 72.74 69.09 34.93 34.57 72.63 71.15 +WN syn 65.62 67.38 29.96 29.82 69.70 68.91", |
|
"content": "<table><tr><td>Embeddings</td><td/><td>MEN-3k \u03b3 \u03c1</td><td>SimLex-999 WordSim-353 \u03b3 \u03c1 \u03b3 \u03c1</td></tr><tr><td/><td>+LDA</td><td colspan=\"2\">59.65 60.23 22.25 22.70 55.65 57.57</td></tr><tr><td>This work</td><td>+PPDB</td><td colspan=\"2\">68.99 68.99 31.35 29.85 62.31 63.96</td></tr><tr><td/><td colspan=\"3\">+WN syn 66.72 66.84 29.78 28.47 59.62 61.34</td></tr><tr><td>CBOW</td><td/><td colspan=\"2\">62.93 65.84 28.34 28.31 68.50 66.67</td></tr><tr><td colspan=\"4\">Yu and Dredze (2014) 65.08 Faruqui et al. (2015) +PPDB +PPDB 65.07 67.55 37.07 35.02 71.76 71.18 +WN syn 63.71 66.44 30.15 29.83 71.24 69.39</td></tr><tr><td/><td>+LDA</td><td colspan=\"2\">50.07 56.64 21.47 23.01 41.56 47.27</td></tr><tr><td>This work</td><td>+PPDB</td><td colspan=\"2\">65.30 67.68 37.34 35.74 72.01 72.05</td></tr><tr><td/><td colspan=\"3\">+WN syn 63.89 66.74 33.96 33.82 68.70 66.91</td></tr><tr><td>SG</td><td/><td colspan=\"2\">64.79 66.71 26.97 26.59 68.88 67.80</td></tr><tr><td>Kiela et al. (2015)</td><td colspan=\"3\">+PPDB +WN syn 64.58 67.02 29.43 28.12 69.15 68.36 67.38 69.05 32.49 31.84 71.59 69.82</td></tr><tr><td>Faruqui et al. (2015)</td><td colspan=\"3\">+PPDB +WN syn 65.65 66.71 28.25 27.61 70.21 69.47 65.44 67.02 34.12 33.72 71.24 70.31</td></tr><tr><td/><td>+LDA</td><td colspan=\"2\">64.02 65.33 24.64 24.28 59.43 60.60</td></tr><tr><td>This work</td><td>+PPDB</td><td>67.17</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Sentiment classification results on IMDB data set", |
|
"content": "<table/>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |