|
{ |
|
"paper_id": "E14-1049", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T10:39:05.012696Z" |
|
}, |
|
"title": "Improving Vector Space Word Representations Using Multilingual Correlation", |
|
"authors": [ |
|
{ |
|
"first": "Manaal", |
|
"middle": [], |
|
"last": "Faruqui", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University Pittsburgh", |
|
"location": { |
|
"postCode": "15213", |
|
"region": "PA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University Pittsburgh", |
|
"location": { |
|
"postCode": "15213", |
|
"region": "PA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "The distributional hypothesis of Harris (1954), according to which the meaning of words is evidenced by the contexts they occur in, has motivated several effective techniques for obtaining vector space semantic representations of words using unannotated text corpora. This paper argues that lexico-semantic content should additionally be invariant across languages and proposes a simple technique based on canonical correlation analysis (CCA) for incorporating multilingual evidence into vectors generated monolingually. We evaluate the resulting word representations on standard lexical semantic evaluation tasks and show that our method produces substantially better semantic representations than monolingual techniques.", |
|
"pdf_parse": { |
|
"paper_id": "E14-1049", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "The distributional hypothesis of Harris (1954), according to which the meaning of words is evidenced by the contexts they occur in, has motivated several effective techniques for obtaining vector space semantic representations of words using unannotated text corpora. This paper argues that lexico-semantic content should additionally be invariant across languages and proposes a simple technique based on canonical correlation analysis (CCA) for incorporating multilingual evidence into vectors generated monolingually. We evaluate the resulting word representations on standard lexical semantic evaluation tasks and show that our method produces substantially better semantic representations than monolingual techniques.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Data-driven learning of vector-space word embeddings that capture lexico-semantic properties is a technique of central importance in natural language processing. Using cooccurrence statistics from a large corpus of text (Deerwester et al., 1990 ; Turney and Pantel, 2010), 1 it is possible to construct high-quality semantic vectors -as judged by both correlations with human judgements of semantic relatedness (Turney, 2006; Agirre et al., 2009) and as features for downstream applications (Turian et al., 2010) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 244, |
|
"text": "(Deerwester et al., 1990", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 411, |
|
"end": 425, |
|
"text": "(Turney, 2006;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 426, |
|
"end": 446, |
|
"text": "Agirre et al., 2009)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 491, |
|
"end": 512, |
|
"text": "(Turian et al., 2010)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The observation that vectors representing cooccurrence tendencies would capture meaning is expected according to the distributional hypothesis (Harris, 1954) , famously articulated by Firth (1957) as You shall know a word by the company it keeps. Although there is much evidence in favor of the distributional hypothesis, in this paper we argue for incorporating translational context when constructing vector space semantic models (VSMs). Simply put: knowing how words translate is a valuable source of lexico-semantic information and should lead to better VSMs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 157, |
|
"text": "(Harris, 1954)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 184, |
|
"end": 196, |
|
"text": "Firth (1957)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Parallel corpora have long been recognized as valuable for lexical semantic applications, including identifying word senses (Diab, 2003; Resnik and Yarowsky, 1999) and paraphrase and synonymy relationships (Bannard and Callison-Burch, 2005) . The latter work (which we build on) shows that if different words or phrases in one language often translate into a single word or phrase type in a second language, this is good evidence that they are synonymous. To illustrate: the English word forms aeroplane, airplane, and plane are observed to translate into the same Hindi word:", |
|
"cite_spans": [ |
|
{ |
|
"start": 124, |
|
"end": 136, |
|
"text": "(Diab, 2003;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 137, |
|
"end": 163, |
|
"text": "Resnik and Yarowsky, 1999)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 206, |
|
"end": 240, |
|
"text": "(Bannard and Callison-Burch, 2005)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "(vaayuyaan). Thus, even if we did not know the relationship between the English words, this translation fact is evidence that they all have the same meaning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "How can we exploit information like this when constructing VSMs? We propose a technique that first constructs independent VSMs in two languages and then projects them onto a common vector space such that translation pairs (as determined by automatic word alignments) should be maximally correlated ( \u00a72). We review latent semantic analysis (LSA), which serves as our monolingual VSM baseline ( \u00a73), and a suite of standard evaluation tasks that we use to measure the quality of the embeddings ( \u00a74). We then turn to experiments. We first show that our technique leads to substantial improvements over monolingual LSA ( \u00a75), and then examine how our technique fares with vectors learned using two different neural networks, one that models word sequences and a second that models bags-of-context words. We observe substantial improvements over the sequential model using multilingual evidence but more mixed results relative to using the bagsof-contexts model ( \u00a76).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To gain information from the translation of a given word in other languages the most basic thing to do would be to just append the given word representation with the word representations of its translation in the other language. This has three drawbacks: first, it increases the number of dimensions in the vector; second, it can pull irrelevant information from the other language that doesn't generalize across languages and finally the given word might be out of vocabulary of the parallel corpus or dictionary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual Correlation with CCA", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To counter these problems we use CCA 2 which is a way of measuring the linear relationship between two multidimensional variables. It finds two projection vectors, one for each variable, that are optimal with respect to correlations. The dimensionality of these new projected vectors is equal to or less than the smaller dimensionality of the two variables.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual Correlation with CCA", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Let \u03a3 \u2208 R n 1 \u00d7d 1 and \u2126 \u2208 R n 2 \u00d7d 2 be vector space embeddings of two different vocabularies where rows represent words. Since the two vocabularies are of different sizes (n 1 and n 2 ) and there might not exist translation for every word of \u03a3 in \u2126, let \u03a3 \u2286 \u03a3 where every word in \u03a3 is translated to one other word 3 in \u2126 \u2286 \u2126 and \u03a3 \u2208 R n\u00d7d 1 and \u2126 \u2208 R n\u00d7d 2 . Let x and y be two corresponding vectors from \u03a3 and \u2126 , and v and w be two projection directions. Then, the projected vectors are:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual Correlation with CCA", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "x = xv y = yw", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual Correlation with CCA", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "(1) and the correlation between the projected vectors can be written as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual Correlation with CCA", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03c1(x , y ) = E[x y ] E[x 2 ]E[y 2 ]", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Multilingual Correlation with CCA", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "CCA maximizes \u03c1 for the given set of vectors \u03a3 and \u2126 and outputs two projection vectors v and w:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual Correlation with CCA", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "v, w = CCA(x, y) = arg max v,w \u03c1(xv, yw)", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Multilingual Correlation with CCA", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Using these two projection vectors we can project the entire vocabulary of the two languages \u03a3 and \u2126 using equation 1. Summarizing:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual Correlation with CCA", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "V , W = CCA(\u03a3 , \u2126 )", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Multilingual Correlation with CCA", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u03a3 * = \u03a3V \u2126 * = \u2126W (5) where, V \u2208 R d 1 \u00d7d , W \u2208 R d 2 \u00d7d con- tain the projection vectors and d = min{rank(V ), rank(W )}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual Correlation with CCA", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Thus, the resulting vectors cannot be longer than the original vectors. Since V and W can be used to project the whole vocabulary, CCA also solves the problem of not having translations of a particular word in the dictionary. The schema of performing CCA on the monolingual word representations of two languages is shown in Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 324, |
|
"end": 332, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Multilingual Correlation with CCA", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Further Dimensionality Reduction: Since CCA gives us correlations and corresponding projection vectors across d dimensions which can be large, we perform experiments by taking projections of the original word vectors across only the top k correlated dimensions. This is trivial to implement as the projection vectors V , W in equation 4 are already sorted in descending order of correlation. Therefore in,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual Correlation with CCA", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u03a3 * k = \u03a3V k \u2126 * k = \u2126W k (6)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual Correlation with CCA", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u03a3 * k and \u2126 * k are now word vector projections along the top k correlated dimensions, where, V k and W k are the column truncated matrices.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual Correlation with CCA", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We perform latent semantic analysis (Deerwester et al., 1990 ) on a word-word co-occurrence matrix. We construct a word co-occurrence frequency matrix F for a given training corpus where each row w, represents one word in the corpus and every column c, is the context feature in which the word is observed. In our case, every column is a word which occurs in a given window length around the target word. For scalability reasons, we only select words with frequency greater than 10 as features. We also remove the top 100 most frequent words (mostly stop words) from the column features.", |
|
"cite_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 60, |
|
"text": "(Deerwester et al., 1990", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Latent Semantic Analysis", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We then replace every entry in the sparse frequency matrix F by its pointwise mutual information (PMI) (Church and Hanks, 1990; Turney, 2001 ) resulting in X. PMI is designed to give a high value to x ij where there is a interesting relation between w i and c j , a small or negative value of x ij indicates that the occurrence of w i in c j is uninformative. Finally, we factorize the matrix X using singular value decomposition (SVD). SVD decomposes X into the product of three matrices:", |
|
"cite_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 127, |
|
"text": "(Church and Hanks, 1990;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 128, |
|
"end": 140, |
|
"text": "Turney, 2001", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Latent Semantic Analysis", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "X = U \u03a8V (7)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Latent Semantic Analysis", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "where, U and V are in column orthonormal form and \u03a8 is a diagonal matrix of singular values (Golub and Van Loan, 1996) . We obtain a reduced dimensional representation of words from size |V | to k:", |
|
"cite_spans": [ |
|
{ |
|
"start": 92, |
|
"end": 118, |
|
"text": "(Golub and Van Loan, 1996)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Latent Semantic Analysis", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "A = U k \u03a8 k (8)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Latent Semantic Analysis", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "where k can be controlled to trade off between reconstruction error and number of parameters, \u03a8 k is the diagonal matrix containing the top k singular values, U k is the matrix produced by selecting the corresponding columns from U and A represents the new matrix containing word vector representations in the reduced dimensional space.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Latent Semantic Analysis", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We evaluate the quality of our word vector representations on a number of tasks that test how well they capture both semantic and syntactic aspects of the representations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Representation Evaluation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We evaluate our word representations on four different benchmarks that have been widely used to measure word similarity. The first one is the WS-353 dataset (Finkelstein et al., 2001 ) containing 353 pairs of English words that have been assigned similarity ratings by humans. This data was further divided into two fragments by Agirre et al. (2009) who claimed that similarity (WS-SIM) and relatedness (WS-REL) are two different kinds of relations and should be dealt with separately. We present results on the whole set and on the individual fragments as well.", |
|
"cite_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 182, |
|
"text": "(Finkelstein et al., 2001", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 329, |
|
"end": 349, |
|
"text": "Agirre et al. (2009)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Similarity", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The second and third benchmarks are the RG-65 (Rubenstein and Goodenough, 1965) and the MC-30 (Miller and Charles, 1991) datasets that contain 65 and 30 pairs of nouns respectively and have been given similarity rankings by humans. These differ from WS-353 in that it contains only nouns whereas the former contains all kinds of words. The fourth benchmark is the MTurk-287 (Radinsky et al., 2011) dataset that constitutes of 287 pairs of words and is different from the above two benchmarks in that it has been constructed by crowdsourcing the human similarity ratings using Amazon Mechanical Turk.", |
|
"cite_spans": [ |
|
{ |
|
"start": 46, |
|
"end": 79, |
|
"text": "(Rubenstein and Goodenough, 1965)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 94, |
|
"end": 120, |
|
"text": "(Miller and Charles, 1991)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 374, |
|
"end": 397, |
|
"text": "(Radinsky et al., 2011)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Similarity", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We calculate similarity between a given pair of words by the cosine similarity between their corresponding vector representation. We then report Spearman's rank correlation coefficient (Myers and Well, 1995) between the rankings produced by our model against the human rankings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 185, |
|
"end": 207, |
|
"text": "(Myers and Well, 1995)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Similarity", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Mikolov et al. (2013a) present a new semantic relation dataset composed of analogous word pairs. It contains pairs of tuples of word relations that follow a common semantic relation. For example, in England : London :: France : Paris, the two given pairs of words follow the country-capital relation. There are three other such kinds of relations: country-currency, man-woman, city-in-state and overall 8869 such pairs of words 4 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Relations (SEM-REL)", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The task here is to find a word d that best fits the following relationship: a : b :: c : d given a, b and c. We use the vector offset method described in Mikolov et al. (2013a) that computes the vector y = x a \u2212 x b + x c where, x a , x b and x c are word vectors of a, b and c respectively and returns the vector x w from the whole vocabulary which has the highest cosine similarity to y:", |
|
"cite_spans": [ |
|
{ |
|
"start": 155, |
|
"end": 177, |
|
"text": "Mikolov et al. (2013a)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Relations (SEM-REL)", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "x w = arg max xw x w \u2022 y |x w | \u2022 |y|", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Relations (SEM-REL)", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "It is worth noting that this is a non-trivial |V |-way classification task where V is the size of the vocabulary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Relations (SEM-REL)", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "This dataset contains word pairs that are different syntactic forms of a given word and was prepared by Mikolov et al. (2013a) . For example, in walking and walked, the second word is the past tense of the first word. There are nine such different kinds of relations: adjective-adverb, opposites, comaparative, superlative, presentparticiple, nation-nationality, past tense, plural nouns and plural verbs. Overall there are 10675 such syntactic pairs of word tuples. The task here again is identifying a word d that best fits the following relationship: a : b :: c : d and we solve it using the method described in \u00a74.2.", |
|
"cite_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 126, |
|
"text": "Mikolov et al. (2013a)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Syntactic Relations (SYN-REL)", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "For English, German and Spanish we used the WMT-2011 5 monolingual news corpora and for French we combined the WMT-2011 and 2012 6 monolingual news corpora so that we have around 300 million tokens for each language to train the word vectors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "For CCA, a one-to-one correspondence between the two sets of vectors is required. Obviously, the vocabulary of two languages are of different sizes and hence to obtain one-to-one mapping, for every English word we choose a word from the other language to which it has been aligned the maximum number of times 7 in a parallel corpus. We got these word alignment counts using cdec (Dyer et al., 2010) from the parallel news commentary corpora (WMT 2006-10) combined with the Europarl corpus for English-{German, French, Spanish}.", |
|
"cite_spans": [ |
|
{ |
|
"start": 379, |
|
"end": 398, |
|
"text": "(Dyer et al., 2010)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 441, |
|
"end": 454, |
|
"text": "(WMT 2006-10)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We construct LSA word vectors of length 640 8 for English, German, French and Spanish. We project the English word vectors using CCA by pairing them with German, French and Spanish vectors. For every language pair we take the top k correlated dimensions (cf. equation 6), where k \u2208 10%, 20%, . . . 100% and tune the performance on WS-353 task. We then select the k that gives us the best average performance across language pairs, which is k = 80%, and evaluate the corresponding vectors on all other benchmarks. This prevents us from over-fitting k for every individual task. Table 1 shows the Spearman's correlation ratio obtained by using word vectors to compute the similarity between two given words and compare the ranked list against human rankings. The first row in the table shows the baseline scores obtained by using only the monolingual English vectors whereas the other rows correspond to the multilingual cases. The last row shows the average performance of the three language pairs. For all the tasks we get at least an absolute gain of 20 points over the baseline. These results are highly assuring of our hypothesis that multilingual context can help in improving the semantic similarity between similar words as described in the example in \u00a71. Results across language pairs remain almost the same and the differences are most of the times statistically insignificant. Table 1 also shows the accuracy obtained on predicting different kinds of relations between word pairs. For the SEM-REL task the average improvement in accuracy is an absolute 30 points over the baseline which is highly statistically significant (p < 0.01) according to the McNemar's test (Dietterich, 1998) . The same holds true for the SYN-REL task where we get an average improvement of absolute 8 points over the baseline across the language pairs. Such an improvement in scores across these relation prediction tasks further enforces our claim that cross-lingual context can be exploited using the method described in \u00a72 and it does help in encoding the meaning of a word better in a word vector than monolingual information alone. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 1675, |
|
"end": 1693, |
|
"text": "(Dietterich, 1998)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 577, |
|
"end": 584, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
}, |
|
{ |
|
"start": 1386, |
|
"end": 1393, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "To understand how multilingual evidence leads to better results in semantic evaluation tasks, we plot the word representations obtained in \u00a73 of several synonyms and antonyms of the word \"beautiful\" by projecting both the transformed and untransformed vectors onto R 2 using the t-SNE tool (van der Maaten and Hinton, 2008) . The untransformed LSA vectors are in the upper part of Fig. 2 , and the CCA-projected vectors are in the lower part. By comparing the two regions, we see that in the untransformed representations, the antonyms are in two clusters separated by the synonyms, whereas in the transformed representation, both the antonyms and synonyms are in their own cluster. Furthermore, the average intra-class distance between synonyms and antonyms is reduced. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 290, |
|
"end": 323, |
|
"text": "(van der Maaten and Hinton, 2008)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 381, |
|
"end": 387, |
|
"text": "Fig. 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Qualitative Example", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "In order to demonstrate that the gains in performance by using multilingual correlation sustains for different number of dimensions, we compared the performance of the monolingual and (German-English) multilingual vectors with k = 80% (cf. \u00a75.2). It can be see in figure 3 that the performance improvement for multilingual vectors remains almost the same for different vector lengths strengthening the reliability of our approach.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 264, |
|
"end": 272, |
|
"text": "figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Variation in Vector Length", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "Other kinds of vectors shown to be useful in many NLP tasks are word embeddings obtained from neural networks. These word embeddings capture more complex information than just co-occurrence counts as explained in the next section. We test our multilingual projection method on two types of such vectors by keeping the experimental setting exactly the same as in \u00a75.2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Network Word Representations", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The recurrent neural network language model maximizes the log-likelihood of the training corpus. The architecture (Mikolov et al., 2013b) consists of an input layer, a hidden layer with recurrent connections to itself, an output layer and the corresponding weight matrices. The input vector w(t) represents input word at time t encoded using 1-of-N encoding and the output layer y(t) produces a probability distribution over words in the vocabulary V . The hidden layer maintains a representation of the sentence history in s(t). The values in the hidden and output layer are computed as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 114, |
|
"end": 137, |
|
"text": "(Mikolov et al., 2013b)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "RNN Vectors", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "s(t) = f(U w(t) + W s(t \u2212 1))", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "RNN Vectors", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "y(t) = g(V s(t))", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "RNN Vectors", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "where, f and g are the logistic and softmax functions respectively. U and V are weight matrices and the word representations are found in the columns of U . The model is trained using backpropagation. Training such a purely lexical model will induce representations with syntactic and semantic properties. We use the RNNLM toolkit 9 to induce these word representations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "RNN Vectors", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "In the RNN model ( \u00a76.1) most of the complexity is caused by the non-linear hidden layer. This is avoided in the new model proposed in Mikolov et al. (2013a) where they remove the non-linear hidden layer and there is a single projection layer for the input word. Precisely, each current word is used as an input to a log-linear classifier with continuous projection layer and words within a certain range before and after the word are predicted. These vectors are called the skip-gram (SG) vectors. We used the tool 10 for obtaining these word vectors with default settings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 135, |
|
"end": 157, |
|
"text": "Mikolov et al. (2013a)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Skip Gram Vectors", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "We compare the best results obtained by using different types of monolingual word representations across all language pairs. For brevity we do not show the results individually for all language pairs as they follow the same pattern when compared to the baseline for every vector type. We train word vectors of length 80 because it was computationally intractable to train the neural embeddings for higher dimensions. For multilingual vectors, we obtain k = 60% (cf. \u00a75.2). Table 2 shows the correlation ratio and the accuracies for the respective evaluation tasks. For the RNN vectors the performance improves upon inclusion of multilingual context for almost all tasks except for SYN-REL where the loss is statistically significant (p < 0.01). For MC-30 and SEM-REL the small drop in performance is not statistically significant. Interestingly, the performance gain/loss for the SG vectors in most of the cases is not statistically significant, which means that inclusion of multilingual context is not very helpful. In fact, for SYN-REL the loss is statistically significant (p < 0.05) which is similar to the performance of RNN case. Overall, the best results are obtained by the SG vectors in six out of eight evaluation tasks whereas SVD vectors give the best performance in two tasks: RG-65, MC-30. This is an encouraging result as SVD vectors are the easiest and fastest to obtain as compared to the other two vector types.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 473, |
|
"end": 480, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "To further understand why multilingual context is highly effective for SVD vectors and to a large extent for RNN vectors as well, we plot ( Figure 4 ) the correlation ratio obtained by varying the length of word representations by using equation 6 for the three different vector types on two word similarity tasks: WS-353 and RG-65.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 148, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "SVD vectors improve performance upon the increase of the number of dimensions and tend to saturate towards the end. For all the three language pairs the SVD vectors show uniform pattern of performance which gives us the liberty to use any language pair at hand. This is not true for the RNN vectors whose curves are significantly different for every language pair. SG vectors show a uniform pattern across different language pairs and the performance with multilingual context converges to the monolingual performance when the vector length becomes equal to the monolingual case (k = 80). The fact that both SG and SVD vectors have similar behavior across language pairs can be treated as evidence that semantics or information at a conceptual level (since both of them basically model word cooccurrence counts) transfers well across languages (Dyvik, 2004) although syntax has been projected across languages as well (Hwa et al., 2005; Yarowsky and Ngai, 2001) . The pattern of results in the case of RNN vectors are indicative of the fact that these vectors encode syntactic information as explained in \u00a76 which might not generalize well as compared to semantic information.", |
|
"cite_spans": [ |
|
{ |
|
"start": 844, |
|
"end": 857, |
|
"text": "(Dyvik, 2004)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 918, |
|
"end": 936, |
|
"text": "(Hwa et al., 2005;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 937, |
|
"end": 961, |
|
"text": "Yarowsky and Ngai, 2001)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "Our method of learning multilingual word vectors is most closely associated to Zou et al. (2013) who learn bilingual word embeddings and show their utility in machine translation. They optimize the monolingual and the bilingual objective together whereas we do it in two separate steps and project to a common vector space to maximize correlation between the two. Vuli\u0107 and Moens (2013) learn bilingual vector spaces from non parallel data induced using a seed lexicon. Our method can also be seen as an application of multi-view learning (Chang et al., 2013; Collobert and Weston, 2008) , where one of the views can be used to capture cross-lingual information. Klementiev et al. (2012) use a multitask learning framework to encourage the word representations learned by neural language models to agree cross-lingually. CCA can be used for dimension reduction and to draw correspondences between two sets of data. Haghighi et al. (2008) use CCA to draw translation lexicons between words of two different languages using only monolingual corpora. CCA has also been used for constructing monolingual word representations by correlating word vectors that capture aspects of word meaning and different types of distributional profile of the word (Dhillon et al., 2011) . Although our primary experimental emphasis was on LSA based monolingual word representations, which we later generalized to two different neural network based word embeddings, these monolingual word vectors can also be obtained using other continuous models of language (Collobert and Weston, 2008; Mnih and Hinton, 2008; Morin and Bengio, 2005; Huang et al., 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 79, |
|
"end": 96, |
|
"text": "Zou et al. (2013)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 364, |
|
"end": 386, |
|
"text": "Vuli\u0107 and Moens (2013)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 539, |
|
"end": 559, |
|
"text": "(Chang et al., 2013;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 560, |
|
"end": 587, |
|
"text": "Collobert and Weston, 2008)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 663, |
|
"end": 687, |
|
"text": "Klementiev et al. (2012)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 915, |
|
"end": 937, |
|
"text": "Haghighi et al. (2008)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1244, |
|
"end": 1266, |
|
"text": "(Dhillon et al., 2011)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1539, |
|
"end": 1567, |
|
"text": "(Collobert and Weston, 2008;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1568, |
|
"end": 1590, |
|
"text": "Mnih and Hinton, 2008;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1591, |
|
"end": 1614, |
|
"text": "Morin and Bengio, 2005;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1615, |
|
"end": 1634, |
|
"text": "Huang et al., 2012)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Bilingual representations have previously been explored with manually designed vector space models (Peirsman and Pad\u00f3, 2010; Sumita, 2000) and with unsupervised algorithms like LDA and LSA (Boyd-Graber and Blei, 2012; Zhao and Xing, 2006) . Bilingual evidence has also been exploited for word clustering which is yet another form of representation learning, using both spectral methods (Zhao et al., 2005) and structured prediction approaches (T\u00e4ckstr\u00f6m et al., 2012; Faruqui and Dyer, 2013) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 124, |
|
"text": "(Peirsman and Pad\u00f3, 2010;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 125, |
|
"end": 138, |
|
"text": "Sumita, 2000)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 189, |
|
"end": 217, |
|
"text": "(Boyd-Graber and Blei, 2012;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 218, |
|
"end": 238, |
|
"text": "Zhao and Xing, 2006)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 386, |
|
"end": 405, |
|
"text": "(Zhao et al., 2005)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 443, |
|
"end": 467, |
|
"text": "(T\u00e4ckstr\u00f6m et al., 2012;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 468, |
|
"end": 491, |
|
"text": "Faruqui and Dyer, 2013)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We have presented a canonical correlation analysis based method for incorporating multilingual context into word representations generated using only monolingual information and shown its applicability across three different ways of generating monolingual vectors on a variety of evaluation benchmarks. These word representations obtained after using multilingual evidence perform significantly better on the evaluation tasks compared to the monolingual vectors. We have also shown that our method is more suitable for vectors that encode semantic information than those that encode syntactic information. Our work suggests that multilingual evidence is an important resource even for purely monolingual, semantically aware applications. The tool for projecting word vectors can be found at http://cs.cmu. edu/\u02dcmfaruqui/soft.html.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Related approaches use the internal representations from neural network models of word sequences(Collobert and Weston, 2008) or continuous bags-of-context wordsels(Mikolov et al., 2013a) to arrive at vector representations that likewise capture cooccurence tendencies and meanings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use the MATLAB module for CCA: http://www. mathworks.com/help/stats/canoncorr.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Further information on how these one-to-one translations are obtained in \u00a75", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "107 pairs were out of vocabulary for our vectors and were ignored.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://www.statmt.org/wmt11/ 6 http://www.statmt.org/wmt12/ 7 We also tried weighted average of vectors across all aligned words and did not observe any significant difference in results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "See section 5.5 for further discussion on vector length.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://www.fit.vutbr.cz/\u02dcimikolov/ rnnlm/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://code.google.com/p/word2vec/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thanks Kevin Gimpel, Noah Smith, and David Bamman for helpful comments on earlier drafts of this paper. This research was supported by the NSF through grant IIS-1352440.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "A study on similarity and relatedness using distributional and wordnet-based approaches", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Proceedings of North American Chapter of the Association for Computational Linguistics, NAACL '09", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "19--27", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A study on similarity and relatedness using distri- butional and wordnet-based approaches. In Pro- ceedings of North American Chapter of the Associ- ation for Computational Linguistics, NAACL '09, pages 19-27, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Paraphrasing with bilingual parallel corpora", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Bannard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proc. of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Bannard and Chris Callison-Burch. 2005. Para- phrasing with bilingual parallel corpora. In Proc. of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Multilingual topic models for unaligned text", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Jordan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Boyd-Graber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Blei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jordan L. Boyd-Graber and David M. Blei. 2012. Mul- tilingual topic models for unaligned text. CoRR, abs/1205.2657.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Multi-relational latent semantic analysis", |
|
"authors": [ |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wen-Tau", |
|
"middle": [], |
|
"last": "Yih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Meek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1602--1612", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kai-Wei Chang, Wen-tau Yih, and Christopher Meek. 2013. Multi-relational latent semantic analysis. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 1602-1612, Seattle, Washington, USA, October. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Word association norms, mutual information, and lexicography", |
|
"authors": [ |
|
{ |
|
"first": "Kenneth", |
|
"middle": [ |
|
"Ward" |
|
], |
|
"last": "Church", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Hanks", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "Comput. Linguist", |
|
"volume": "16", |
|
"issue": "1", |
|
"pages": "22--29", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenneth Ward Church and Patrick Hanks. 1990. Word association norms, mutual information, and lexicog- raphy. Comput. Linguist., 16(1):22-29, March.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A unified architecture for natural language processing: deep neural networks with multitask learning", |
|
"authors": [ |
|
{ |
|
"first": "Ronan", |
|
"middle": [], |
|
"last": "Collobert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 25th international conference on Machine learning, ICML '08", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "160--167", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ronan Collobert and Jason Weston. 2008. A unified architecture for natural language processing: deep neural networks with multitask learning. In Pro- ceedings of the 25th international conference on Machine learning, ICML '08, pages 160-167, New York, NY, USA. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Indexing by latent semantic analysis", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Deerwester", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Dumais", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Landauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Furnas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Harshman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "Journal of the American Society for Information Science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. C. Deerwester, S. T. Dumais, T. K. Landauer, G. W. Furnas, and R. A. Harshman. 1990. Indexing by latent semantic analysis. Journal of the American Society for Information Science.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Multi-view learning of word embeddings via cca", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Paramveer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dean", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Dhillon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lyle", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Foster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ungar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "199--207", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paramveer S. Dhillon, Dean P. Foster, and Lyle H. Un- gar. 2011. Multi-view learning of word embeddings via cca. In NIPS, pages 199-207.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Word sense disambiguation within a multilingual framework", |
|
"authors": [ |
|
{ |
|
"first": "Mona", |
|
"middle": [], |
|
"last": "Talat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diab", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mona Talat Diab. 2003. Word sense disambiguation within a multilingual framework. Ph.D. thesis, Uni- versity of Maryland at College Park, College Park, MD, USA. AAI3115805.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Approximate statistical tests for comparing supervised classification learning algorithms", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dietterich", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Neural Computation", |
|
"volume": "10", |
|
"issue": "", |
|
"pages": "1895--1923", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas G. Dietterich. 1998. Approximate statis- tical tests for comparing supervised classification learning algorithms. Neural Computation, 10:1895- 1923.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "cdec: A decoder, alignment, and learning framework for finite-state and context-free translation models", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Lopez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juri", |
|
"middle": [], |
|
"last": "Ganitkevitch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Weese", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hendra", |
|
"middle": [], |
|
"last": "Setiawan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ferhan", |
|
"middle": [], |
|
"last": "Ture", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vladimir", |
|
"middle": [], |
|
"last": "Eidelman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of ACL System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Dyer, Adam Lopez, Juri Ganitkevitch, Jonathan Weese, Hendra Setiawan, Ferhan Ture, Vladimir Ei- delman, Phil Blunsom, and Philip Resnik. 2010. cdec: A decoder, alignment, and learning framework for finite-state and context-free translation models. In In Proceedings of ACL System Demonstrations.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Translations as semantic mirrors: from parallel corpus to wordnet", |
|
"authors": [ |
|
{ |
|
"first": "Helge", |
|
"middle": [], |
|
"last": "Dyvik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Language and Computers", |
|
"volume": "49", |
|
"issue": "1", |
|
"pages": "311--326", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Helge Dyvik. 2004. Translations as semantic mir- rors: from parallel corpus to wordnet. Language and Computers, 49(1):311-326.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "An information theoretic approach to bilingual word clustering", |
|
"authors": [ |
|
{ |
|
"first": "Manaal", |
|
"middle": [], |
|
"last": "Faruqui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Manaal Faruqui and Chris Dyer. 2013. An informa- tion theoretic approach to bilingual word clustering.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "777--783", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "In Proceedings of the 51st Annual Meeting of the As- sociation for Computational Linguistics (Volume 2: Short Papers), pages 777-783, Sofia, Bulgaria, Au- gust.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Placing search in context: the concept revisited", |
|
"authors": [ |
|
{ |
|
"first": "Lev", |
|
"middle": [], |
|
"last": "Finkelstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evgeniy", |
|
"middle": [], |
|
"last": "Gabrilovich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yossi", |
|
"middle": [], |
|
"last": "Matias", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ehud", |
|
"middle": [], |
|
"last": "Rivlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zach", |
|
"middle": [], |
|
"last": "Solan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gadi", |
|
"middle": [], |
|
"last": "Wolfman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eytan", |
|
"middle": [], |
|
"last": "Ruppin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "WWW '01: Proceedings of the 10th international conference on World Wide Web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "406--414", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lev Finkelstein, Evgeniy Gabrilovich, Yossi Matias, Ehud Rivlin, Zach Solan, Gadi Wolfman, and Ey- tan Ruppin. 2001. Placing search in context: the concept revisited. In WWW '01: Proceedings of the 10th international conference on World Wide Web, pages 406-414, New York, NY, USA. ACM Press.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "A synopsis of linguistic theory 1930-1955. Studies in linguistic analysis", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Firth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1957, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--32", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J.R. Firth. 1957. A synopsis of linguistic theory 1930- 1955. Studies in linguistic analysis, pages 1-32.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Matrix computations", |
|
"authors": [ |
|
{ |
|
"first": "Gene", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Golub", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Charles", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Van Loan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gene H. Golub and Charles F. Van Loan. 1996. Matrix computations (3rd ed.). Johns Hopkins University Press, Baltimore, MD, USA.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Learning bilingual lexicons from monolingual corpora", |
|
"authors": [ |
|
{ |
|
"first": "Aria", |
|
"middle": [], |
|
"last": "Haghighi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Taylor", |
|
"middle": [], |
|
"last": "Berg-Kirkpatrick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proc. of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aria Haghighi, Percy Liang, Taylor Berg-Kirkpatrick, and Dan Klein. 2008. Learning bilingual lexicons from monolingual corpora. In Proc. of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Distributional structure. Word", |
|
"authors": [ |
|
{ |
|
"first": "Zellig", |
|
"middle": [], |
|
"last": "Harris", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1954, |
|
"venue": "", |
|
"volume": "10", |
|
"issue": "", |
|
"pages": "146--162", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zellig Harris. 1954. Distributional structure. Word, 10(23):146-162.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Improving word representations via global context and multiple word prototypes", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Eric", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew Y", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics: Long Papers", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "873--882", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric H Huang, Richard Socher, Christopher D Man- ning, and Andrew Y Ng. 2012. Improving word representations via global context and multiple word prototypes. In Proceedings of the 50th Annual Meet- ing of the Association for Computational Linguis- tics: Long Papers-Volume 1, pages 873-882. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Bootstrapping parsers via syntactic projection across parallel texts", |
|
"authors": [ |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Hwa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amy", |
|
"middle": [], |
|
"last": "Weinberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clara", |
|
"middle": [], |
|
"last": "Cabezas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Okan", |
|
"middle": [], |
|
"last": "Kolak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Natural Language Engineering", |
|
"volume": "11", |
|
"issue": "", |
|
"pages": "11--311", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rebecca Hwa, Philip Resnik, Amy Weinberg, Clara Cabezas, and Okan Kolak. 2005. Bootstrapping parsers via syntactic projection across parallel texts. Natural Language Engineering, 11:11-311.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Inducing crosslingual distributed representations of words", |
|
"authors": [ |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Klementiev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Binod", |
|
"middle": [], |
|
"last": "Bhattarai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexandre Klementiev, Ivan Titov, and Binod Bhat- tarai. 2012. Inducing crosslingual distributed rep- resentations of words. In Proceedings of COLING.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Efficient estimation of word representations in vector space", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1301.3781" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jef- frey Dean. 2013a. Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Linguistic regularities in continuous space word representations", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yih", |
|
"middle": [], |
|
"last": "Wen-Tau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Zweig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "746--751", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Wen-tau Yih, and Geoffrey Zweig. 2013b. Linguistic regularities in continuous space word representations. In Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 746-751, Atlanta, Georgia, June. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Contextual correlates of semantic similarity", |
|
"authors": [ |
|
{ |
|
"first": "George", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Miller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Walter", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Charles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1991, |
|
"venue": "Language and Cognitive Processes", |
|
"volume": "6", |
|
"issue": "1", |
|
"pages": "1--28", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George A. Miller and Walter G. Charles. 1991. Con- textual correlates of semantic similarity. Language and Cognitive Processes, 6(1):1-28.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "A scalable hierarchical distributed language model", |
|
"authors": [ |
|
{ |
|
"first": "Andriy", |
|
"middle": [], |
|
"last": "Mnih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andriy Mnih and Geoffrey Hinton. 2008. A scalable hierarchical distributed language model. In In NIPS.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Hierarchical probabilistic neural network language model", |
|
"authors": [ |
|
{ |
|
"first": "Frederic", |
|
"middle": [], |
|
"last": "Morin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "AISTATS05", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "246--252", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Frederic Morin and Yoshua Bengio. 2005. Hierarchi- cal probabilistic neural network language model. In AISTATS05, pages 246-252.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Crosslingual induction of selectional preferences with bilingual vector spaces", |
|
"authors": [ |
|
{ |
|
"first": "Yves", |
|
"middle": [], |
|
"last": "Peirsman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Pad\u00f3", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Linguistics, HLT '10", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "921--929", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yves Peirsman and Sebastian Pad\u00f3. 2010. Cross- lingual induction of selectional preferences with bilingual vector spaces. In Human Language Tech- nologies: The 2010 Annual Conference of the North American Chapter of the Association for Com- putational Linguistics, HLT '10, pages 921-929, Stroudsburg, PA, USA. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "A word at a time: computing word relatedness using temporal semantic analysis", |
|
"authors": [ |
|
{ |
|
"first": "Kira", |
|
"middle": [], |
|
"last": "Radinsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eugene", |
|
"middle": [], |
|
"last": "Agichtein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evgeniy", |
|
"middle": [], |
|
"last": "Gabrilovich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaul", |
|
"middle": [], |
|
"last": "Markovitch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 20th international conference on World wide web, WWW '11", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "337--346", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kira Radinsky, Eugene Agichtein, Evgeniy Gabrilovich, and Shaul Markovitch. 2011. A word at a time: computing word relatedness using temporal semantic analysis. In Proceedings of the 20th international conference on World wide web, WWW '11, pages 337-346, New York, NY, USA. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Distinguishing systems and distinguishing senses: new evaluation methods for word sense disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Yarowsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Nat. Lang. Eng", |
|
"volume": "5", |
|
"issue": "2", |
|
"pages": "113--133", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philip Resnik and David Yarowsky. 1999. Distinguish- ing systems and distinguishing senses: new evalua- tion methods for word sense disambiguation. Nat. Lang. Eng., 5(2):113-133, June.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Contextual correlates of synonymy", |
|
"authors": [ |
|
{ |
|
"first": "Herbert", |
|
"middle": [], |
|
"last": "Rubenstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Goodenough", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1965, |
|
"venue": "Commun. ACM", |
|
"volume": "8", |
|
"issue": "10", |
|
"pages": "627--633", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Herbert Rubenstein and John B. Goodenough. 1965. Contextual correlates of synonymy. Commun. ACM, 8(10):627-633, October.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Lexical transfer using a vectorspace model", |
|
"authors": [ |
|
{ |
|
"first": "Eiichiro", |
|
"middle": [], |
|
"last": "Sumita", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of the 38th Annual Meeting on Association for Computational Linguistics, ACL '00", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "425--431", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eiichiro Sumita. 2000. Lexical transfer using a vector- space model. In Proceedings of the 38th Annual Meeting on Association for Computational Linguis- tics, ACL '00, pages 425-431, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Cross-lingual word clusters for direct transfer of linguistic structure", |
|
"authors": [ |
|
{ |
|
"first": "Oscar", |
|
"middle": [], |
|
"last": "T\u00e4ckstr\u00f6m", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "The 2012 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oscar T\u00e4ckstr\u00f6m, Ryan McDonald, and Jakob Uszko- reit. 2012. Cross-lingual word clusters for direct transfer of linguistic structure. In The 2012 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, volume 1, page 11. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Word representations: a simple and general method for semi-supervised learning", |
|
"authors": [ |
|
{ |
|
"first": "Joseph", |
|
"middle": [], |
|
"last": "Turian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lev", |
|
"middle": [], |
|
"last": "Ratinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics, ACL '10", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "384--394", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joseph Turian, Lev Ratinov, and Yoshua Bengio. 2010. Word representations: a simple and general method for semi-supervised learning. In Proceedings of the 48th Annual Meeting of the Association for Com- putational Linguistics, ACL '10, pages 384-394, Stroudsburg, PA, USA. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "From frequency to meaning : Vector space models of semantics", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Turney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pantel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Journal of Artificial Intelligence Research", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "141--188", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter D. Turney and Patrick Pantel. 2010. From fre- quency to meaning : Vector space models of se- mantics. Journal of Artificial Intelligence Research, pages 141-188.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Mining the web for synonyms: Pmi-ir versus lsa on toefl", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Turney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the 12th European Conference on Machine Learning, EMCL '01", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "491--502", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter D. Turney. 2001. Mining the web for synonyms: Pmi-ir versus lsa on toefl. In Proceedings of the 12th European Conference on Machine Learning, EMCL '01, pages 491-502, London, UK, UK. Springer- Verlag.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Similarity of semantic relations", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Turney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Comput. Linguist", |
|
"volume": "32", |
|
"issue": "3", |
|
"pages": "379--416", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter D. Turney. 2006. Similarity of semantic rela- tions. Comput. Linguist., 32(3):379-416, Septem- ber.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Visualizing Data using t-SNE", |
|
"authors": [ |
|
{ |
|
"first": "Laurens", |
|
"middle": [], |
|
"last": "Van Der Maaten", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "2579--2605", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Laurens van der Maaten and Geoffrey Hinton. 2008. Visualizing Data using t-SNE. Journal of Machine Learning Research, 9:2579-2605, November.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "A study on bootstrapping bilingual vector spaces from nonparallel data (and nothing else)", |
|
"authors": [ |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Vuli\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie-Francine", |
|
"middle": [], |
|
"last": "Moens", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1613--1624", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ivan Vuli\u0107 and Marie-Francine Moens. 2013. A study on bootstrapping bilingual vector spaces from non- parallel data (and nothing else). In Proceedings of the 2013 Conference on Empirical Methods in Natu- ral Language Processing, pages 1613-1624, Seattle, Washington, USA, October. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Inducing multilingual pos taggers and np bracketers via robust projection across aligned corpora", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Yarowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Grace", |
|
"middle": [], |
|
"last": "Ngai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the second meeting of the North American Chapter of the Association for Computational Linguistics on Language technologies, NAACL '01", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Yarowsky and Grace Ngai. 2001. Inducing mul- tilingual pos taggers and np bracketers via robust projection across aligned corpora. In Proceedings of the second meeting of the North American Chap- ter of the Association for Computational Linguistics on Language technologies, NAACL '01, pages 1- 8, Stroudsburg, PA, USA. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Bitam: Bilingual topic admixture models for word alignment", |
|
"authors": [ |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Xing", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 44th Annual Meeting of the Association for Computational Linguistics (ACL06", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bing Zhao and Eric P. Xing. 2006. Bitam: Bilingual topic admixture models for word alignment. In In Proceedings of the 44th Annual Meeting of the As- sociation for Computational Linguistics (ACL06.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Bilingual word spectral clustering for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Xing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Waibel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the ACL Workshop on Building and Using Parallel Texts, ParaText '05", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "25--32", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bing Zhao, Eric P. Xing, and Alex Waibel. 2005. Bilingual word spectral clustering for statistical ma- chine translation. In Proceedings of the ACL Work- shop on Building and Using Parallel Texts, ParaText '05, pages 25-32, Stroudsburg, PA, USA. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Bilingual word embeddings for phrase-based machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Will", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Zou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1393--1398", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Will Y. Zou, Richard Socher, Daniel Cer, and Christo- pher D. Manning. 2013. Bilingual word embed- dings for phrase-based machine translation. In Pro- ceedings of the 2013 Conference on Empirical Meth- ods in Natural Language Processing, pages 1393- 1398, Seattle, Washington, USA, October. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Cross-lingual word vector projection using CCA.", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Monolingual (top) and multilingual (bottom; marked with apostrophe) word projections of the antonyms (shown in red) and synonyms of \"beautiful\".", |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Performance of monolingual and multilingual vectors on WS-353 for different vector lengths.", |
|
"num": null |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Performance as a function of vector length on word similarity tasks. The monolingual vectors always have a fixed length of 80, they are just shown in the plots for comparison.", |
|
"num": null |
|
}, |
|
"TABREF0": { |
|
"html": null, |
|
"text": "Spearman's correlation (left) and accuracy (right) on different tasks.", |
|
"type_str": "table", |
|
"content": "<table><tr><td>Lang</td><td>Dim</td><td colspan=\"6\">WS-353 WS-SIM WS-REL RG-65 MC-30 MTurk-287</td><td colspan=\"2\">SEM-REL SYN-REL</td></tr><tr><td>En</td><td>640</td><td>46.7</td><td>56.2</td><td>36.5</td><td>50.7</td><td>42.3</td><td>51.2</td><td>14.5</td><td>36.8</td></tr><tr><td>De-En</td><td>512</td><td>68.0</td><td>74.4</td><td>64.6</td><td>75.5</td><td>81.9</td><td>53.6</td><td>43.9</td><td>45.5</td></tr><tr><td>Fr-En</td><td>512</td><td>68.4</td><td>73.3</td><td>65.7</td><td>73.5</td><td>81.3</td><td>55.5</td><td>43.9</td><td>44.3</td></tr><tr><td>Es-En</td><td>512</td><td>67.2</td><td>71.6</td><td>64.5</td><td>70.5</td><td>78.2</td><td>53.6</td><td>44.2</td><td>44.5</td></tr><tr><td>Average</td><td>-</td><td>56.6</td><td>64.5</td><td>51.0</td><td>62.0</td><td>65.5</td><td>60.8</td><td>44</td><td>44.7</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"text": "Spearman's correlation (left) and accuracy (right) on different tasks. Bold indicates best result across all vector types. Mono: monolingual and Multi: multilingual.", |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>WS-353</td><td>RG-65</td></tr><tr><td/><td>SVD</td></tr><tr><td>Correlation ratio (%)</td><td>RNN</td></tr><tr><td/><td>SG</td></tr><tr><td/><td>Number of dimensions</td></tr></table>", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |