|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:40:03.068503Z" |
|
}, |
|
"title": "Denoising Word Embeddings by Averaging in a Shared Space", |
|
"authors": [ |
|
{ |
|
"first": "Avi", |
|
"middle": [], |
|
"last": "Caciularu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Bar-Ilan University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Bar-Ilan University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Goldberger", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Bar-Ilan University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We introduce a new approach for smoothing and improving the quality of word embeddings. We consider a method of fusing word embeddings that were trained on the same corpus but with different initializations. We project all the models to a shared vector space using an efficient implementation of the Generalized Procrustes Analysis (GPA) procedure, previously used in multilingual word translation. Our word representation demonstrates consistent improvements over the raw models as well as their simplistic average, on a range of tasks. As the new representations are more stable and reliable, there is a noticeable improvement in rare word evaluations.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We introduce a new approach for smoothing and improving the quality of word embeddings. We consider a method of fusing word embeddings that were trained on the same corpus but with different initializations. We project all the models to a shared vector space using an efficient implementation of the Generalized Procrustes Analysis (GPA) procedure, previously used in multilingual word translation. Our word representation demonstrates consistent improvements over the raw models as well as their simplistic average, on a range of tasks. As the new representations are more stable and reliable, there is a noticeable improvement in rare word evaluations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Continuous (non-contextualized) word embeddings have been introduced several years ago as a standard building block for NLP tasks. These models provide efficient ways to learn word representations in a fully self-supervised manner from text corpora, solely based on word co-occurrence statistics. A wide variety of methods now exist for generating word embeddings, with prominent methods including word2vec (Mikolov et al., 2013a) , GloVe (Pennington et al., 2014) , and FastText (Bojanowski et al., 2017) . Recently, contextualized embeddings (Peters et al., 2018; Devlin et al., 2019) , replaced the use of non-contextualized embeddings in many settings. Yet, the latter remain the standard choice for typical lexical-semantic tasks, e.g., semantic similarity (Hill et al., 2015) , word analogy (Jurgens et al., 2012) , relation classification (Barkan et al., 2020a) , and paraphrase identification (Meged et al., 2020) . These tasks consider the generic meanings of lexical items, given out of context, hence the use of non-contextualized embeddings is appropriate. Notably, FastText was shown to yield state-of-theart results in most of these tasks (Bojanowski et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 407, |
|
"end": 430, |
|
"text": "(Mikolov et al., 2013a)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 439, |
|
"end": 464, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 480, |
|
"end": 505, |
|
"text": "(Bojanowski et al., 2017)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 544, |
|
"end": 565, |
|
"text": "(Peters et al., 2018;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 566, |
|
"end": 586, |
|
"text": "Devlin et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 762, |
|
"end": 781, |
|
"text": "(Hill et al., 2015)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 797, |
|
"end": 819, |
|
"text": "(Jurgens et al., 2012)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 846, |
|
"end": 868, |
|
"text": "(Barkan et al., 2020a)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 901, |
|
"end": 921, |
|
"text": "(Meged et al., 2020)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 1153, |
|
"end": 1178, |
|
"text": "(Bojanowski et al., 2017)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "While word embedding methods proved to be powerful, they suffer from a certain level of noise, introduced by quite a few randomized steps in the embedding generation process, including embedding initialization, negative sampling, subsampling and mini-batch ordering. Consequently, different runs would yield different embedding geometries, of varying quality. This random noise might harm most severely the representation of rare words, for which the actual data signal is rather weak (Barkan et al., 2020b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 485, |
|
"end": 507, |
|
"text": "(Barkan et al., 2020b)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we propose denoising word embedding models through generating multiple model versions, each created with different random seeds. Then, the resulting representations for each word should be fused effectively, in order to obtain a model with a reduced level of noise. Note, however, that simple averaging of the original word vectors is problematic, since each training session of the algorithm produces embeddings in a different space. In fact, the objective scores of both word2vec, Glove and FastText are invariant to multiplying all the word embeddings by an orthogonal matrix, hence, the algorithm output involves an arbitrary rotation of the embedding space.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "For addressing this issue, we were inspired by recent approaches originally proposed for aligning multi-lingual embeddings (Chen and Cardie, 2018; Kementchedjhieva et al., 2018; Alaux et al., 2019; Jawanpuria et al., 2019; Taitelbaum et al., 2019) . To obtain such alignments, these methods simultaneously project the original language-specific embeddings into a shared space, while enforcing (or at least encouraging) transitive orthogonal transformations. In our (monolingual) setting, we propose a related technique to project the different embedding versions into a shared space, while optimizing the projection towards obtaining an improved fused representation. We show that this results in im-proved performance on a range of lexical-semantic tasks, with notable improvements for rare words, as well as on several sentence-level downstream tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 123, |
|
"end": 146, |
|
"text": "(Chen and Cardie, 2018;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 147, |
|
"end": 177, |
|
"text": "Kementchedjhieva et al., 2018;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 178, |
|
"end": 197, |
|
"text": "Alaux et al., 2019;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 198, |
|
"end": 222, |
|
"text": "Jawanpuria et al., 2019;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 223, |
|
"end": 247, |
|
"text": "Taitelbaum et al., 2019)", |
|
"ref_id": "BIBREF44" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Assume we are given an ensemble of k pre-trained word embedding sets, of the same word vocabulary of size n and the same dimensionality d. In our setting, these sets are obtained by training the same embedding model using different random parameter initializations. Our goal is to fuse the k embedding sets into a single \"average\" embedding that is hopefully more robust and would yield better performance on various tasks. Since each embedding set has its own space, we project the k embedding spaces into a shared space, in which we induce averaged embeddings based on a mean squared error minimization objective.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Averaging in a Shared Space", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Let x i,t \u2208 R d be the dense representation of the t-th word in the i-th embedding set. We model the mapping from the i-th set to the shared space by an orthogonal matrix denoted by T i . Denote the sought shared space representation of the t-th word by y t \u2208 R d . Our goal is to find a set of transformations T = {T 1 , ..., T k } and target word embeddings y = {y 1 , ..., y n } in the shared space that minimize the following mean-squared error:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Averaging in a Shared Space", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "S(T, y) = k i=1 n t=1 T i x i,t \u2212 y t 2 .", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Word Averaging in a Shared Space", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "For this objective, it is easy to show that for a set of transformations T 1 , ..., T k , the optimal shared space representation is:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Averaging in a Shared Space", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "y t = 1 k k i=1 T i x i,t .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Averaging in a Shared Space", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Hence, solving the optimization problem pertains to finding the k optimal transformations. In the case where k = 2, the optimal T can be obtained in a closed form using the Procrustes Analysis (PA) procedure (Sch\u00f6nemann, 1966) , which has been employed in recent bilingual word translation methods (Xing et al., 2015; Artetxe et al., 2016; Hamilton et al., 2016; Artetxe et al., 2017a,b; Conneau et al., 2017; Artetxe et al., 2018a,b; . In our setting, to obtain an improved embedding, we wish to average more than two embedding sets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 208, |
|
"end": 226, |
|
"text": "(Sch\u00f6nemann, 1966)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 298, |
|
"end": 317, |
|
"text": "(Xing et al., 2015;", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 318, |
|
"end": 339, |
|
"text": "Artetxe et al., 2016;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 340, |
|
"end": 362, |
|
"text": "Hamilton et al., 2016;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 363, |
|
"end": 387, |
|
"text": "Artetxe et al., 2017a,b;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 388, |
|
"end": 409, |
|
"text": "Conneau et al., 2017;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 410, |
|
"end": 434, |
|
"text": "Artetxe et al., 2018a,b;", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Averaging in a Shared Space", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "However, if k > 2 there is no closed form solution to (1) and thus, we need to find a solution using an iterative optimization process. To that end, we follow several works that suggested employing the General Procrustes Analysis (GPA) procedure, which is an extension of PA to multi-set alignment (Gower, 1975; Kementchedjhieva et al., 2018) . Generally, the GPA consists of an alternate minimization procedure where we iterate between finding the orthogonal transformations and computing the shared space. The optimal transformation from each embedding space to the shared space is found by minimizing the following score,", |
|
"cite_spans": [ |
|
{ |
|
"start": 298, |
|
"end": 311, |
|
"text": "(Gower, 1975;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 312, |
|
"end": 342, |
|
"text": "Kementchedjhieva et al., 2018)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Averaging in a Shared Space", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "S(T i ) = n t=1 T i x i,t \u2212 y t 2 , i = 1, ..., k.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Averaging in a Shared Space", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The minimum of S(T i ) can then be found by the closed form PA procedure. The updated transformation is", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Averaging in a Shared Space", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "T i = U i V i , where U i \u03a3 i V i is the singular value decomposition (SVD) of the d \u00d7 d matrix n t=1 y t x i,t", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Averaging in a Shared Space", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": ". At each step in the iterative GPA algorithm, the score (1) is monotonically decreased until it converges to a local minimum point.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Averaging in a Shared Space", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Algorithm 1 Shared Space Embedding Averaging 1: Input: Ensemble of k word embedding sets. 2: Task: Find the optimal average embedding. 3: Preprocessing: 4: Compute the cross-correlation matrices: 5:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Averaging in a Shared Space", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "C ij = C ji = n t=1 x j,t x i,t 1 \u2264 i < j \u2264 k 6: Initialization: T 1 = \u2022 \u2022 \u2022 = T k\u22121 = 0, T k = I 7: while not converged do 8: for i = 1, ..., k do 9: U \u03a3V = SVD j =i T j C ij 10: T i \u2190 U V 11:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Averaging in a Shared Space", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "end for 12: end while 13: Compute the average embedding:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Averaging in a Shared Space", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "14: y t \u2190 1 k k i=1 T i x i,t t = 1, ..., n", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Averaging in a Shared Space", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "For large vocabularies, GPA is not efficient, because, in each iteration, when computing the SVD we need to sum over all the vocabulary words. To circumvent this computational cost, we adopt the optimization procedure from Taitelbaum et al. 2019, which we apply within each iteration. Instead of summing over the whole vocabulary, the following extension is proposed. Let C ij = t x j,t x i,t be the cross-correlation matrix original denoised word2vec 0.40 \u00b1 0.005 0.059 \u00b1 0.003 GloVe 0.38 \u00b1 0.006 0.058 \u00b1 0.003 FastText 0.35 \u00b1 0.003 0.054 \u00b1 0.001 Table 1 : Average MSE scores of the embedding models with and without applying the SSEA algorithm.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 548, |
|
"end": 555, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Word Averaging in a Shared Space", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "for a pair (i, j) of two original embedding spaces, which can be computed once, for all pairs of spaces, in a pre-processing step. Given the matrices C ij the computational complexity of the iterative averaging algorithm is independent of the vocabulary size, allowing us to compute efficiently the SVD. The resulting algorithm termed Shared Space Embedding Averaging (SSEA) is presented in Algorithm 1. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Averaging in a Shared Space", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "This section presents our evaluation protocol, datasets, data preparation, hyperparameter configuration and results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup and Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We trained word2vec (Mikolov et al., 2013a) , Fast-Text (Bojanowski et al., 2017) and GloVe (Pennington et al., 2014) embeddings. For word2vec we used the skip-gram model with negative sampling, which was shown advantageous on the evaluated tasks (Levy et al., 2015) . We trained each of the models on the November 2019 dump of Wikipedia articles 2 for k = 30 times, with different random seeds, and used the default reported hyperparameters; we set the embedding dimension to d = 200, and considered each word within the maximal window c max = 5, subsampling 3 threshold of \u03c1 = 10 \u22125 and used 5 negative examples for every positive example. In order to keep a large amount of rare words in the corpus, no preprocessing was applied on the data, yielding a vocabulary size of 1.5 \u2022 10 6 . We then applied the SSEA algorithm to the embedding sets to obtain the average embedding. The original embedding sets and averaged embeddings were centered around the 0 vector and normalized to unit vectors.", |
|
"cite_spans": [ |
|
{ |
|
"start": 20, |
|
"end": 43, |
|
"text": "(Mikolov et al., 2013a)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 56, |
|
"end": 81, |
|
"text": "(Bojanowski et al., 2017)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 266, |
|
"text": "(Levy et al., 2015)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation Details and Data", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We next analyze how our method improves embedding quality and consistency, notably for rare 1 The algorithm demonstration code is available at github.com/aviclu/SSEA. In practice, we utilized an efficient PyTorch implementation based on Taitelbaum et al. (2019 words. To that end, for any two embedding sets, u and v, we can find the optimal mapping Q between them using the PA algorithm and compute its mean square error", |
|
"cite_spans": [ |
|
{ |
|
"start": 92, |
|
"end": 93, |
|
"text": "1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 237, |
|
"end": 260, |
|
"text": "Taitelbaum et al. (2019", |
|
"ref_id": "BIBREF44" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Improved Embedding Stability", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "(MSE), 1 n t=1 Qu t \u2212 v t 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Improved Embedding Stability", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We define the stability of an embedding algorithm by the average MSE (over 10 random pairs of samples) between two instances of it. This score measures the similarity between the geometries of random instances generated by a particular embedding method , and thus reflects the consistency and stability of that method. The scores of the different models are depicted in Table 1 . As observed, after applying SSEA the Average MSE drops by an order of magnitude, indicating much better stability of the obtained embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 370, |
|
"end": 377, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Improved Embedding Stability", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We can perform a similar analysis for each word separately. A consistent embedding of the t-th word in both sets u and v should result in a small mapping discrepancy Qu t \u2212v t 2 . Figure 1 depicts MSE for the models and their computed SSEA, as a function of the word's frequency in the corpus. The denoised version of the models is marked with a 'D-' prefix. For clarity of presentation, we did not include the results for GloVe (which are similar to word2vec). As expected, embedding stability always increases (MSE decreases) with word frequency. SSEA is notably more stable across the frequency range, with the error minimized early on and reduced most drastically for low frequencies.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 180, |
|
"end": 188, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Improved Embedding Stability", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We next compare our denoised model, denoted with a 'D-' prefix, with the original embedding models. As an additional baseline, we considered also the na\u00efve averaged embedding model, denoted with a 'A-' prefix, where for every word we computed the simplistic mean embedding across all origi-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison of methods", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "SimLex999 nal spaces. Note that we did not compare other proposed embeddings or meta-embedding learning methods, but rather restricted our analysis to empirically verifying our embedding aggregation method and validating the assumptions behind the empirical analysis we performed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "MEN WS353 AP Google MSR SemEval2012(", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We evaluated the performance of our method over lexical-semantic tasks, including word similarity, analogy solving, and concept categorization: Sim-Lex999 (Hill et al., 2015) , MEN (Bruni et al., 2014) , WS353 (Finkelstein et al., 2002) , AP (Almuhareb and Poesio, 2004), Google (Mikolov et al., 2013b) , MSR (Mikolov et al., 2013c) , SemEval-2012 (Jurgens et al., 2012 , BLESS (Baroni and Lenci, 2011) and RW (Luong et al., 2013) , (focusing on rare words). For the analogy task, we reported the accuracy. For the remaining tasks, we computed Spearman's correlation between the cosine similarity of the embeddings and the human judgments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 155, |
|
"end": 174, |
|
"text": "(Hill et al., 2015)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 181, |
|
"end": 201, |
|
"text": "(Bruni et al., 2014)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 210, |
|
"end": 236, |
|
"text": "(Finkelstein et al., 2002)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 279, |
|
"end": 302, |
|
"text": "(Mikolov et al., 2013b)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 309, |
|
"end": 332, |
|
"text": "(Mikolov et al., 2013c)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 335, |
|
"end": 347, |
|
"text": "SemEval-2012", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 348, |
|
"end": 369, |
|
"text": "(Jurgens et al., 2012", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 410, |
|
"end": 430, |
|
"text": "(Luong et al., 2013)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluations on Lexical Semantic Tasks", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Results The results of the lexical-semantic tasks are depicted in Table 2 , averaged over 30 runs for each method. Our method obtained better performance than the other methods, substantially for FastText embeddings. As shown, the na\u00efve averaging performed poorly, which highlights the fact that simply averaging different embedding spaces does not improve word representation quality. The most notable performance gain was in the rare-words task, in line with the analysis in Fig. 1 , suggesting that on rare words the raw embedding vectors fit the data less accurately.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 73, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 477, |
|
"end": 483, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluations on Lexical Semantic Tasks", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "For completeness, we next show the relative advantage of our denoising method also when applied to several sentence-level downstream benchmarks. While contextualized embeddings domi-nate a wide range of sentence-and document-level NLP tasks (Peters et al., 2018; Devlin et al., 2019; Caciularu et al., 2021) , we assessed the relative advantage of our denoising method when utilizing (non-contextualized) word embeddings in sentencean document-level settings. We applied the exact procedure proposed in Li et al. (2017) and Rogers et al. (2018) , as an effective benchmark for the quality of static embedding models. We first used sequence labeling tasks. The morphological and syntactic performance was evaluated using part-of-speech tagging, POS, and chunking, CHK. Both named entity recognition, NER, and multiway classification of semantic relation classes, RE, tasks were used for evaluating semantic information at the word level. For the above POS, NER and CHK sequence labeling tasks, we used the CoNLL 2003 dataset (Sang and Meulder, 2003) and for the RE task, we used the SemEval 2010 task 8 dataset (Hendrickx et al., 2010) . The neural network models employed for these downstream tasks are fully described in (Rogers et al., 2018) . Next, we evaluated the following semantic level tasks: document-level polarity classification, PC, using the Stanford IMDB movie review dataset (Maas et al., 2011) , sentence level sentiment polarity classification, SEN, using the MR dataset of short movie reviews (Pang and Lee, 2005) , and classification of subjectivity and objectivity task, SUB, that uses the Rotten Tomatoes user review snippets against official movie plot summaries (Pang and Lee, 2004) . Similarly to the performance results in Table 2 , the current results show that the suggested denoised embeddings obtained better overall performance than the other methods, substantially for FastText embeddings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 241, |
|
"end": 262, |
|
"text": "(Peters et al., 2018;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 263, |
|
"end": 283, |
|
"text": "Devlin et al., 2019;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 284, |
|
"end": 307, |
|
"text": "Caciularu et al., 2021)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 503, |
|
"end": 519, |
|
"text": "Li et al. (2017)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 524, |
|
"end": 544, |
|
"text": "Rogers et al. (2018)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 1005, |
|
"end": 1033, |
|
"text": "CoNLL 2003 dataset (Sang and", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1034, |
|
"end": 1048, |
|
"text": "Meulder, 2003)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 1110, |
|
"end": 1134, |
|
"text": "(Hendrickx et al., 2010)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1222, |
|
"end": 1243, |
|
"text": "(Rogers et al., 2018)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 1390, |
|
"end": 1409, |
|
"text": "(Maas et al., 2011)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 1511, |
|
"end": 1531, |
|
"text": "(Pang and Lee, 2005)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 1685, |
|
"end": 1705, |
|
"text": "(Pang and Lee, 2004)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1748, |
|
"end": 1755, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluations On Downstream Tasks", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "A similar situation of aligning different word embeddings into a shared space occurs in multi-lingual word translation tasks which are based on distinct monolingual word embeddings. Word translation is performed by transforming each language word embeddings into a shared space by an orthogonal matrix, for creating a \"universal language\", which is useful for the word translation process. Our setting may be considered by viewing each embedding set as a different language, where our goal is to find the shared space where embedding averaging is meaningful.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The main challenge in multilingual word translation is to obtain a reliable multi-way word correspondence in either a supervised or unsupervised manner. One problem is that standard dictionaries contain multiple senses for words, which is problematic for bilingual translation, and further amplified in a multilingual setting. In our case of embedding averaging, the mapping problem vanishes since we are addressing a single language and the word correspondences hold trivially among different embeddings of the same word. Thus, in our setting, there are no problems of wrong word correspondences, neither the issue of having different word translations due to multiple word senses. Studies have shown that for the multi-lingual translation problem, enforcing the transformation to be strictly orthogonal is too restrictive and performance can be improved by using the orthogonalization as a regularization (Chen and Cardie, 2018) that yields matrices that are close to be orthogonal. In our much simpler setting of a single language, with a trivial identity word correspondence, enforcing the orthogonalization constraint is reasonable.", |
|
"cite_spans": [ |
|
{ |
|
"start": 907, |
|
"end": 930, |
|
"text": "(Chen and Cardie, 2018)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Another related problem is meta-embedding (Yin and Sch\u00fctze, 2016) , which aims to fuse information from different embedding models. Various methods have been proposed for embedding fusion, such as concatenation, simple averaging, weighted averaging (Coates and Bollegala, 2018; Kiela et al., 2018) and autoencoding (Bollegala and Bao, 2018) . Some of these methods (concatenation and autoencoding) are not scalable when the goal is to fuse many sets, while others (simple averaging) yield inferior results, as described in the above works. Note that our method is not intended to be a competitor of meta-embedding, but rather a complementary method.", |
|
"cite_spans": [ |
|
{ |
|
"start": 42, |
|
"end": 65, |
|
"text": "(Yin and Sch\u00fctze, 2016)", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 249, |
|
"end": 277, |
|
"text": "(Coates and Bollegala, 2018;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 278, |
|
"end": 297, |
|
"text": "Kiela et al., 2018)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 315, |
|
"end": 340, |
|
"text": "(Bollegala and Bao, 2018)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "An additional related work is the recent method from (Murom\u00e4gi et al., 2017) . Similarly to our work, they proposed a method based on the Procrustes Analysis procedure for aligning and averaging sets of word embedding models. However, the mapping algorithm they used is much more computationally demanding, as it requires to go over all the dictionary words in every iteration. Instead, we propose an efficient optimization algorithm, which requires just one such computation during each iteration, and is theoretically guaranteed to converge to a local minimum point. While their work focuses on improving over the Estonian language, we suggest evaluating this approach on English data and on a range of different downstream tasks. We show that our method significantly improves upon rare words, which is beneficial for small sized / domain-specific corpora.", |
|
"cite_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 76, |
|
"text": "(Murom\u00e4gi et al., 2017)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We presented a novel technique for creating better word representations by training an embedding model several times, from which we derive an averaged representation. The resulting word representations proved to be more stable and reliable than the raw embeddings. Our method exhibits performance gains in lexical-semantic tasks, notably over rare words, confirming our analytical assumptions. This suggests that our method may be particularly useful for training embedding models in low-resource settings. Appealing future research may extend our approach to improving sentence-level representations, by fusing several contextualized embedding models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "5" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The authors would like to thank the anonymous reviewers for their comments and suggestions. The work described herein was supported in part by grants from Intel Labs, Facebook, the Israel Science Foundation grant 1951/17 and the German Research Foundation through the German-Israeli Project Cooperation (DIP, grant DA 1600/1-1).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Unsupervised hyperalignment for multilingual word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Alaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Cuturi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Learning Representations (ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jean Alaux, Edouard Grave, Marco Cuturi, and Ar- mand Joulin. 2019. Unsupervised hyperalignment for multilingual word embeddings. In International Conference on Learning Representations (ICLR).", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Attribute-based and value-based clustering: An evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Abdulrahman", |
|
"middle": [], |
|
"last": "Almuhareb", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Massimo", |
|
"middle": [], |
|
"last": "Poesio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the conference on empirical methods in natural language processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abdulrahman Almuhareb and Massimo Poesio. 2004. Attribute-based and value-based clustering: An eval- uation. In Proceedings of the conference on em- pirical methods in natural language processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Learning principled bilingual mappings of word embeddings while preserving monolingual invariance", |
|
"authors": [ |
|
{ |
|
"first": "Mikel", |
|
"middle": [], |
|
"last": "Artetxe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gorka", |
|
"middle": [], |
|
"last": "Labaka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2016. Learning principled bilingual mappings of word em- beddings while preserving monolingual invariance. In Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Learning bilingual word embeddings with (almost) no bilingual data", |
|
"authors": [ |
|
{ |
|
"first": "Mikel", |
|
"middle": [], |
|
"last": "Artetxe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gorka", |
|
"middle": [], |
|
"last": "Labaka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Annual Meeting of the Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2017a. Learning bilingual word embeddings with (almost) no bilingual data. In Annual Meeting of the Association for Computational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Generalizing and improving bilingual word embedding mappings with a multi-step framework of linear transformations", |
|
"authors": [ |
|
{ |
|
"first": "Mikel", |
|
"middle": [], |
|
"last": "Artetxe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gorka", |
|
"middle": [], |
|
"last": "Labaka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence (AAAI)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2018a. Generalizing and improving bilingual word embedding mappings with a multi-step framework of linear transformations. In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI).", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A robust self-learning method for fully unsupervised cross-lingual mappings of word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Mikel", |
|
"middle": [], |
|
"last": "Artetxe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gorka", |
|
"middle": [], |
|
"last": "Labaka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1805.06297" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2018b. A robust self-learning method for fully un- supervised cross-lingual mappings of word embed- dings. arXiv preprint arXiv:1805.06297.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Unsupervised neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Mikel", |
|
"middle": [], |
|
"last": "Artetxe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gorka", |
|
"middle": [], |
|
"last": "Labaka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1710.11041" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mikel Artetxe, Gorka Labaka, Eneko Agirre, and Kyunghyun Cho. 2017b. Unsupervised neural ma- chine translation. arXiv preprint arXiv:1710.11041.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Within-between lexical relation classification", |
|
"authors": [ |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Barkan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Avi", |
|
"middle": [], |
|
"last": "Caciularu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3521--3527", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.284" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oren Barkan, Avi Caciularu, and Ido Dagan. 2020a. Within-between lexical relation classification. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 3521-3527, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Bayesian hierarchical words representation learning", |
|
"authors": [ |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Barkan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Idan", |
|
"middle": [], |
|
"last": "Rejwan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Avi", |
|
"middle": [], |
|
"last": "Caciularu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Koenigstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3871--3877", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.356" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oren Barkan, Idan Rejwan, Avi Caciularu, and Noam Koenigstein. 2020b. Bayesian hierarchical words representation learning. In Proceedings of the 58th Annual Meeting of the Association for Computa- tional Linguistics, pages 3871-3877, Online. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "How we blessed distributional semantic evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Baroni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the GEMS 2011 Workshop on GEometrical Models of Natural Language Semantics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Baroni and Alessandro Lenci. 2011. How we blessed distributional semantic evaluation. In Pro- ceedings of the GEMS 2011 Workshop on GEometri- cal Models of Natural Language Semantics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Enriching word vectors with subword information", |
|
"authors": [ |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Computational Linguistics (TACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the Associa- tion for Computational Linguistics (TACL).", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Learning word meta-embeddings by autoencoding", |
|
"authors": [ |
|
{ |
|
"first": "Danushka", |
|
"middle": [], |
|
"last": "Bollegala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cong", |
|
"middle": [], |
|
"last": "Bao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Danushka Bollegala and Cong Bao. 2018. Learning word meta-embeddings by autoencoding. In Inter- national Conference on Computational Linguistics, (COLING).", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Multimodal distributional semantics", |
|
"authors": [ |
|
{ |
|
"first": "Elia", |
|
"middle": [], |
|
"last": "Bruni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nam-Khanh", |
|
"middle": [], |
|
"last": "Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Baroni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Journal of Artificial Intelligence Research", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elia Bruni, Nam-Khanh Tran, and Marco Baroni. 2014. Multimodal distributional semantics. Journal of Ar- tificial Intelligence Research.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Crossdocument language modeling. arXiv e-prints", |
|
"authors": [ |
|
{ |
|
"first": "Avi", |
|
"middle": [], |
|
"last": "Caciularu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arman", |
|
"middle": [], |
|
"last": "Cohan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iz", |
|
"middle": [], |
|
"last": "Beltagy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Matthew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arie", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Cattan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Avi Caciularu, Arman Cohan, Iz Beltagy, Matthew E Peters, Arie Cattan, and Ido Dagan. 2021. Cross- document language modeling. arXiv e-prints, pages arXiv-2101.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Unsupervised multilingual word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Xilun", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Cardie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xilun Chen and Claire Cardie. 2018. Unsupervised multilingual word embeddings. In Conference on Empirical Methods in Natural Language Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Frustratingly easy meta-embedding -computing metaembeddings by averaging source word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Coates", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danushka", |
|
"middle": [], |
|
"last": "Bollegala", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "North American Chapter of the Association for Computational Linguistics (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joshua Coates and Danushka Bollegala. 2018. Frustrat- ingly easy meta-embedding -computing metaem- beddings by averaging source word embeddings. In North American Chapter of the Association for Com- putational Linguistics (NAACL).", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Word translation without parallel data", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc'aurelio", |
|
"middle": [], |
|
"last": "Ranzato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ludovic", |
|
"middle": [], |
|
"last": "Denoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Herv\u00e9", |
|
"middle": [], |
|
"last": "J\u00e9gou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1710.04087" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau, Guillaume Lample, Marc'Aurelio Ranzato, Ludovic Denoyer, and Herv\u00e9 J\u00e9gou. 2017. Word translation without parallel data. arXiv preprint arXiv:1710.04087.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Placing search in context: The concept revisited", |
|
"authors": [ |
|
{ |
|
"first": "Lev", |
|
"middle": [], |
|
"last": "Finkelstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evgeniy", |
|
"middle": [], |
|
"last": "Gabrilovich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yossi", |
|
"middle": [], |
|
"last": "Matias", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ehud", |
|
"middle": [], |
|
"last": "Rivlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zach", |
|
"middle": [], |
|
"last": "Solan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gadi", |
|
"middle": [], |
|
"last": "Wolfman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eytan", |
|
"middle": [], |
|
"last": "Ruppin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "ACM Transactions on information systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lev Finkelstein, Evgeniy Gabrilovich, Yossi Matias, Ehud Rivlin, Zach Solan, Gadi Wolfman, and Ey- tan Ruppin. 2002. Placing search in context: The concept revisited. ACM Transactions on informa- tion systems.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Generalized procrustes analysis", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gower", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1975, |
|
"venue": "Psychometrika", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John C Gower. 1975. Generalized procrustes analysis. Psychometrika.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Diachronic word embeddings reveal statistical laws of semantic change", |
|
"authors": [ |
|
{ |
|
"first": "Jure", |
|
"middle": [], |
|
"last": "William L Hamilton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Leskovec", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1605.09096" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William L Hamilton, Jure Leskovec, and Dan Juraf- sky. 2016. Diachronic word embeddings reveal sta- tistical laws of semantic change. arXiv preprint arXiv:1605.09096.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "SemEval-2010 task 8: Multi-way classification of semantic relations between pairs of nominals", |
|
"authors": [ |
|
{ |
|
"first": "Iris", |
|
"middle": [], |
|
"last": "Hendrickx", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Su", |
|
"middle": [ |
|
"Nam" |
|
], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zornitsa", |
|
"middle": [], |
|
"last": "Kozareva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diarmuid\u00f3", |
|
"middle": [], |
|
"last": "S\u00e9aghdha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Pad\u00f3", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Pennacchiotti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lorenza", |
|
"middle": [], |
|
"last": "Romano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stan", |
|
"middle": [], |
|
"last": "Szpakowicz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 5th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "33--38", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iris Hendrickx, Su Nam Kim, Zornitsa Kozareva, Preslav Nakov, Diarmuid\u00d3 S\u00e9aghdha, Sebastian Pad\u00f3, Marco Pennacchiotti, Lorenza Romano, and Stan Szpakowicz. 2010. SemEval-2010 task 8: Multi-way classification of semantic relations be- tween pairs of nominals. In Proceedings of the 5th International Workshop on Semantic Evalua- tion, pages 33-38, Uppsala, Sweden. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Simlex-999: Evaluating semantic models with (genuine) similarity estimation", |
|
"authors": [ |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roi", |
|
"middle": [], |
|
"last": "Reichart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Korhonen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Felix Hill, Roi Reichart, and Anna Korhonen. 2015. Simlex-999: Evaluating semantic models with (gen- uine) similarity estimation. Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Learning multilingual word embeddings in latent metric space: a geometric approach", |
|
"authors": [ |
|
{ |
|
"first": "Pratik", |
|
"middle": [], |
|
"last": "Jawanpuria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arjun", |
|
"middle": [], |
|
"last": "Balgovind", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anoop", |
|
"middle": [], |
|
"last": "Kunchukuttan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bamdev", |
|
"middle": [], |
|
"last": "Mishra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pratik Jawanpuria, Arjun Balgovind, Anoop Kunchukuttan, and Bamdev Mishra. 2019. Learn- ing multilingual word embeddings in latent metric space: a geometric approach. Transactions of the Association for Computational Linguistics (TACL).", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Semeval-2012 task 2: Measuring degrees of relational similarity", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "David", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jurgens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Turney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Saif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Keith", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Holyoak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the First Joint Conference on Lexical and Computational Semantics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David A Jurgens, Peter D Turney, Saif M Mohammad, and Keith J Holyoak. 2012. Semeval-2012 task 2: Measuring degrees of relational similarity. In Pro- ceedings of the First Joint Conference on Lexical and Computational Semantics-Volume 1: Proceed- ings of the main conference and the shared task, and Volume 2: Proceedings of the Sixth International Workshop on Semantic Evaluation.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Generalizing procrustes analysis for better bilingual dictionary induction", |
|
"authors": [ |
|
{ |
|
"first": "Yova", |
|
"middle": [], |
|
"last": "Kementchedjhieva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Cotterell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "The SIGNLL Conference on Computational Natural Language Learning (CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yova Kementchedjhieva, Sebastian Ruder, Ryan Cot- terell, and Anders S\u00f8gaard. 2018. Generalizing pro- crustes analysis for better bilingual dictionary induc- tion. In The SIGNLL Conference on Computational Natural Language Learning (CoNLL).", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Dynamic meta-embeddings for improved sentence representations", |
|
"authors": [ |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changhan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of thr Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Douwe Kiela, Changhan Wang, and Kyunghyun Cho. 2018. Dynamic meta-embeddings for improved sen- tence representations. In Proceedings of thr Con- ference on Empirical Methods in Natural Language Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Improving distributional similarity with lessons learned from word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Transactions of the Association for Computational Linguistics (TACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Omer Levy, Yoav Goldberg, and Ido Dagan. 2015. Im- proving distributional similarity with lessons learned from word embeddings. Transactions of the Associ- ation for Computational Linguistics (TACL).", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Investigating different syntactic context types and context representations for learning word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Bofang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhe", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Buzhou", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aleksandr", |
|
"middle": [], |
|
"last": "Drozd", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Rogers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoyong", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2421--2431", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1257" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bofang Li, Tao Liu, Zhe Zhao, Buzhou Tang, Alek- sandr Drozd, Anna Rogers, and Xiaoyong Du. 2017. Investigating different syntactic context types and context representations for learning word embed- dings. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 2421-2431, Copenhagen, Denmark. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Better word representations with recursive neural networks for morphology", |
|
"authors": [ |
|
{ |
|
"first": "Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thang Luong, Richard Socher, and Christopher Man- ning. 2013. Better word representations with recur- sive neural networks for morphology. In Proceed- ings of Conference on Computational Natural Lan- guage Learning.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Learning word vectors for sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Maas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Daly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Potts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "142--150", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew L. Maas, Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng, and Christopher Potts. 2011. Learning word vectors for sentiment analy- sis. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Hu- man Language Technologies, pages 142-150, Port- land, Oregon, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Paraphrasing vs coreferring: Two sides of the same coin", |
|
"authors": [ |
|
{ |
|
"first": "Yehudit", |
|
"middle": [], |
|
"last": "Meged", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Avi", |
|
"middle": [], |
|
"last": "Caciularu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vered", |
|
"middle": [], |
|
"last": "Shwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4897--4907", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.findings-emnlp.440" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yehudit Meged, Avi Caciularu, Vered Shwartz, and Ido Dagan. 2020. Paraphrasing vs coreferring: Two sides of the same coin. In Findings of the Associ- ation for Computational Linguistics: EMNLP 2020, pages 4897-4907, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Distributed representations of words and phrases and their compositionality", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Advances in Neural Information Processing Systems (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013a. Distributed representa- tions of words and phrases and their compositional- ity. In Advances in Neural Information Processing Systems (NIPS).", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Distributed representations of words and phrases and their compositionality", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Advances in Neural Information Processing Systems (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013b. Distributed representa- tions of words and phrases and their compositional- ity. In Advances in Neural Information Processing Systems (NIPS).", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Linguistic regularities in continuous space word representations", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yih", |
|
"middle": [], |
|
"last": "Wen-Tau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Zweig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Wen-tau Yih, and Geoffrey Zweig. 2013c. Linguistic regularities in continuous space word representations. In Proceedings of the Confer- ence of the North American Chapter of the Associa- tion for Computational Linguistics (NAACL).", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Linear ensembles of word embedding models", |
|
"authors": [ |
|
{ |
|
"first": "Avo", |
|
"middle": [], |
|
"last": "Murom\u00e4gi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kairit", |
|
"middle": [], |
|
"last": "Sirts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sven", |
|
"middle": [], |
|
"last": "Laur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 21st Nordic Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "96--104", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Avo Murom\u00e4gi, Kairit Sirts, and Sven Laur. 2017. Lin- ear ensembles of word embedding models. In Pro- ceedings of the 21st Nordic Conference on Computa- tional Linguistics, pages 96-104, Gothenburg, Swe- den. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "A sentimental education: Sentiment analysis using subjectivity summarization based on minimum cuts", |
|
"authors": [ |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Pang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lillian", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 42nd Annual Meeting of the Association for Computational Linguistics (ACL-04)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "271--278", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1218955.1218990" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bo Pang and Lillian Lee. 2004. A sentimental edu- cation: Sentiment analysis using subjectivity sum- marization based on minimum cuts. In Proceed- ings of the 42nd Annual Meeting of the Association for Computational Linguistics (ACL-04), pages 271- 278, Barcelona, Spain.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Seeing stars: Exploiting class relationships for sentiment categorization with respect to rating scales", |
|
"authors": [ |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Pang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lillian", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 43rd Annual Meeting of the Association for Computational Linguistics (ACL'05)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "115--124", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1219840.1219855" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bo Pang and Lillian Lee. 2005. Seeing stars: Ex- ploiting class relationships for sentiment categoriza- tion with respect to rating scales. In Proceed- ings of the 43rd Annual Meeting of the Association for Computational Linguistics (ACL'05), pages 115- 124, Ann Arbor, Michigan. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "GloVe: global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. GloVe: global vectors for word rep- resentation. In Empirical Methods in Natural Lan- guage Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Deep contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2227--2237", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-1202" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word rep- resentations. In Proceedings of the 2018 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long Papers), pages 2227-2237, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "What's in your embedding, and how it predicts task performance", |
|
"authors": [ |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Rogers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Shashwath Hosur Ananthakrishna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rumshisky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2690--2703", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anna Rogers, Shashwath Hosur Ananthakrishna, and Anna Rumshisky. 2018. What's in your embedding, and how it predicts task performance. In Proceed- ings of the 27th International Conference on Com- putational Linguistics, pages 2690-2703, Santa Fe, New Mexico, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "A discriminative latent-variable model for bilingual lexicon induction", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Cotterell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1808.09334" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Ruder, Ryan Cotterell, Yova Kementched- jhieva, and Anders S\u00f8gaard. 2018. A discriminative latent-variable model for bilingual lexicon induction. arXiv preprint arXiv:1808.09334.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Introduction to the CoNLL-2003 shared task: Language-independent named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Erik", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Tjong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kim", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fien", |
|
"middle": [], |
|
"last": "De Meulder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the Conference on Natural Language Learning (CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik F. Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the CoNLL-2003 shared task: Language-independent named entity recognition. In Proceedings of the Conference on Natural Language Learning (CoNLL).", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "A generalized solution of the orthogonal procrustes problem", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Sch\u00f6nemann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1966, |
|
"venue": "Psychometrika", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Sch\u00f6nemann. 1966. A generalized solution of the orthogonal procrustes problem. Psychometrika.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "A multi-pairwise extension of procrustes analysis for multilingual word translation", |
|
"authors": [ |
|
{ |
|
"first": "Hagai", |
|
"middle": [], |
|
"last": "Taitelbaum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gal", |
|
"middle": [], |
|
"last": "Chechik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Goldberger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hagai Taitelbaum, Gal Chechik, and Jacob Goldberger. 2019. A multi-pairwise extension of procrustes anal- ysis for multilingual word translation. In Confer- ence on Empirical Methods in Natural Language Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Normalized word embedding and orthogonal transform for bilingual word translation", |
|
"authors": [ |
|
{ |
|
"first": "Chao", |
|
"middle": [], |
|
"last": "Xing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chao", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiye", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chao Xing, Dong Wang, Chao Liu, and Yiye Lin. 2015. Normalized word embedding and orthogonal trans- form for bilingual word translation. In Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL).", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Learning word meta-embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Wenpeng", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Annual Meeting of the Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenpeng Yin and Hinrich Sch\u00fctze. 2016. Learning word meta-embeddings. In Annual Meeting of the Association for Computational Linguistics (ACL).", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Average MSE for word embeddings vs their corpus occurrence count (binned with resolution of 50).", |
|
"uris": null |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Results for lexical-semantic benchmarks. Best performance is bolded." |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Results for downstream task. Best performance is bolded." |
|
} |
|
} |
|
} |
|
} |