|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T02:12:11.953537Z" |
|
}, |
|
"title": "A Simple and Effective Usage of Word Clusters for CBOW Model", |
|
"authors": [ |
|
{ |
|
"first": "Yukun", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tokyo Institute of Technology", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Chenlong", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tokyo Institute of Technology", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Hidetaka", |
|
"middle": [], |
|
"last": "Kamigaito", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tokyo Institute of Technology", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Hiroya", |
|
"middle": [], |
|
"last": "Takamura", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tokyo Institute of Technology", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Manabu", |
|
"middle": [], |
|
"last": "Okumura", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tokyo Institute of Technology", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We propose a simple and effective method for incorporating word clusters into the Continuous Bag-of-Words (CBOW) model. Specifically, we propose to replace infrequent input and output words in CBOW model with their clusters. The resulting cluster-incorporated CBOW model produces embeddings of frequent words and a small amount of cluster embeddings, which will be fine-tuned in downstream tasks. We empirically show our replacing method works well on several downstream tasks. Through our analysis, we show that our method might be also useful for other similar models which produce word embeddings.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We propose a simple and effective method for incorporating word clusters into the Continuous Bag-of-Words (CBOW) model. Specifically, we propose to replace infrequent input and output words in CBOW model with their clusters. The resulting cluster-incorporated CBOW model produces embeddings of frequent words and a small amount of cluster embeddings, which will be fine-tuned in downstream tasks. We empirically show our replacing method works well on several downstream tasks. Through our analysis, we show that our method might be also useful for other similar models which produce word embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Word embeddings have been widely applied to various natural language processing (NLP) tasks. These embeddings can be pretrained on a large corpus and carry useful semantic information. One of the most well-known methods for obtaining word embeddings is based on Continuous Bag-of-Words (CBOW) (Mikolov et al., 2013a) and there have been many research efforts to extend it.", |
|
"cite_spans": [ |
|
{ |
|
"start": 293, |
|
"end": 316, |
|
"text": "(Mikolov et al., 2013a)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we focus on incorporating word clusters into CBOW model. Each word cluster consists of words that function similarly. By aggregating such words, we can alleviate data sparsity, even though each of those words is infrequent. In the past few years, word clusters have been applied to various tasks, such as named-entity recognition (Ritter et al., 2011) , machine translation (Wuebker et al., 2013) and parsing (Kong et al., 2014) . Many word clustering algorithms can be applied to a raw corpus with different languages and help us obtain word clusters easily without additional language resources.", |
|
"cite_spans": [ |
|
{ |
|
"start": 345, |
|
"end": 366, |
|
"text": "(Ritter et al., 2011)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 389, |
|
"end": 411, |
|
"text": "(Wuebker et al., 2013)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 424, |
|
"end": 443, |
|
"text": "(Kong et al., 2014)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In our method, we keep only very frequent words and replace the other words with their clusters for both input and output words in the CBOW model. This is motivated by the fact that word clusters are more reliable than infrequent words. Thus, only very frequent word embeddings and a small amount of cluster embeddings are produced as the output. When fine-tuning the trained embeddings on downstream tasks, the embeddings of infrequent words within one cluster are initialized by the embedding of their cluster to increase the coverage of pretrained word embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Since word embeddings are usually trained on the large-scale dataset. For making clusters on the large-scale dataset, we choose bidirectional, interpolated, refining, and alternating (BIRA) predictive exchange algorithm (Dehdari et al., 2016) 1 as our clustering method. Because BIRA was reported to be faster than many other methods. Notably, it can produce 800 clusters on 1 billion English tokens in 1.4 hours.", |
|
"cite_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 242, |
|
"text": "(Dehdari et al., 2016)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We evaluate our cluster-incorporated word embeddings 2 on downstream tasks, in which finetuning of word embeddings is involved. The evaluation for frequent words, for which our method also works well, on word similarity tasks can be found in appendix A. For the downstream tasks, we choose language modeling (LM) tasks, which are a fundamental task in NLP, as well as two machine translation (MT) tasks. To verify the effect of word clusters across different languages, 8 typologically diverse languages are further selected for the LM task. Finally, an analysis is provided for our method. In summary, our replacing method can be used to improve the embeddings of frequent and infrequent words, to reduce the number of word embeddings and to make training more effective.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A number of related research efforts have been done to help to learn better word embeddings aiming at different aspects. For example, Neelakantan et al. (2014) proposed an extension that learns multiple embeddings per word type. Ammar et al. (2016) proposed methods for estimating embeddings for different languages in a single shared embedding space. There is also a lot of work that incorporates internal information of words, such as character-level information (Chen et al., 2015; Bojanowski et al., 2017) and morpheme information (Luong et al., 2013; Qiu et al., 2014) . Our research aims at another aspect and focuses on incorporating word clusters into the CBOW model, which has not been studied before.", |
|
"cite_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 159, |
|
"text": "Neelakantan et al. (2014)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 229, |
|
"end": 248, |
|
"text": "Ammar et al. (2016)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 465, |
|
"end": 484, |
|
"text": "(Chen et al., 2015;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 485, |
|
"end": 509, |
|
"text": "Bojanowski et al., 2017)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 535, |
|
"end": 555, |
|
"text": "(Luong et al., 2013;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 556, |
|
"end": 573, |
|
"text": "Qiu et al., 2014)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "There have also been some previous researches that utilized word clusters for reducing the number of word embeddings. Botha et al. (2017) used word clusters to reduce the network size for the part-of-speech tagging task. Shu and Nakayama (2018) attempted to compress word embeddings without losing performance by constructing the embeddings with a few basic vectors. Our goal is different from the previous work in that we attempt to learn better word embeddings and do not aim at reducing the parameters when our embeddings are fine-tuned in downstream tasks. Nonetheless, the reduction of the number of word embeddings from the CBOW model before fine-tuning is still one of our goals as we can save space to store these embeddings and save time to download them. For example, Google News Vectors have around 3 million words, and we need only 2% of the number of the word embeddings if we choose 100K most frequent words and 10K word clusters in our method.", |
|
"cite_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 137, |
|
"text": "Botha et al. (2017)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 221, |
|
"end": 244, |
|
"text": "Shu and Nakayama (2018)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Let w t denote the t-th word in a given text. We adopt the basic CBOW model architecture for learning word embeddings. The CBOW model predicts the output word w t given the input words in the window which precede or follow the output word. When the window size is 2, as an example, the input words are w t\u22122 , w t\u22121 , w t+1 , w t+2 . We denote the input and output embeddings of word w i respectively as x i and o i . The CBOW model computes the hidden representation as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CBOW Model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h = 1 2c c i=\u2212c,i =0 x t+i ,", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "CBOW Model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where c is the window size. We use negative sampling (Mikolov et al., 2013b) to train the CBOW model by maximizing the following objective function:", |
|
"cite_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 76, |
|
"text": "(Mikolov et al., 2013b)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CBOW Model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "log\u03c3( h T o t ) + k j=1 log\u03c3(\u2212 h T o j ),", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "CBOW Model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where k is the size of the negative sample, o j is the j-th noise word embedding and \u03c3 is the sigmoid function. Each word in the negative sample is drawn from the unigram distribution. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CBOW Model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "As a method for incorporating word clusters, we propose to replace infrequent words with their clusters for the input and output. The architecture is shown in Figure 1 . This is motivated by the intuition that the embeddings of clusters should be more reliable than those of infrequent words. We denote the embedding of the cluster for word w t+i as d t+i . We present the following two replacing methods:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 159, |
|
"end": 167, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Replacing Methods", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 ReIn: In the input, x t+i in Eq.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Replacing Methods", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "(1) will be replaced with d t+i if the frequency of w t+i is less than threshold f in .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Replacing Methods", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 ReOut: In the output, output words whose frequency is less than f out are replaced with their clusters. Thus, in negative sampling, a noise word will be sampled from clusters and frequent words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Replacing Methods", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "As with the standard CBOW model, we use the input word embeddings and input cluster embeddings for downstream tasks. Thresholds f in and f out are set to 100 in all experiments. Due to this large value, each cluster contains many infrequent words, which share the same embedding. We use two methods together, which is referred to as ReIn+ReOut in the following experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Replacing Methods", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Since the embeddings of clusters are learned by aggregating many infrequent words, they are more robust than the embeddings of the infrequent words. During the fine-tuning process for a downstream task, the embeddings of infrequent words are first initialized with the embeddings of their clusters. As most of these infrequent words appear only a few times, these embeddings will not be updated far away from each other within one cluster. The visualization of these embeddings before and after fine-tuning can be found in the appendix B. As a result, these embeddings for infrequent words become more reliable since originally most infrequent word embeddings are updated only several times and are not far away from where they were randomly initialized. Since the context of frequent words becomes less noisy by replacing all the infrequent words with their clusters, the learned frequent word embeddings are also better, as shown later in our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation of ReIn and ReOut", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "The standard CBOW model is usually trained with negative sampling, which is designed for speeding up the training process. By using ReOut, infrequent noise words will be replaced with their clusters, which contain more noise words than the original CBOW model. As a result, ReOut makes the training of the CBOW model more effective, as shown later in our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation of ReIn and ReOut", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "We applied our embeddings to downstream tasks: language modeling (LM) and low-resource machine translation (MT). When applying to the downstream tasks, we only used the training data of the specific task to obtain word clusters and embeddings without any extra data. We then used the learned embeddings to initialize the lookup table of word embeddings for the task. In this paper, we limit the applications of our model to relatively small datasets to demonstrate the usefulness of our method. We plan to conduct larger-scale experiments on more downstream tasks in future work. In the following tables, CBOW and ReIn+ReOut indicate that they are initialization methods for specific downstream tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments on LM and MT", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In this section, we describe the hyper-parameters for producing word clusters and word embeddings. As we mentioned before, we obtained word clusters through the ClusterCat software. For most hyperparameters, we used its default values. We set the number of clusters to 600 in all our experiments. Since our work involves many tasks in total, it is hard to choose the optimal number of word clusters for each task. We experimented with several values (600, 800 and 1000) and observed the same trend. Thus, we simply chose 600, for convenience, for all tasks. For producing word embeddings, our implementation was based on the fasttext 3 . Our cluster-incorporated CBOW model and the standard CBOW model were trained under the same hyper-parameters. We set most hyper-parameters as its default values. Namely, we set the training epoch to 5, the number of negative examples to 5, the window size to 5, and the minimum count of word occurrence to 5 4 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hyper-parameter Settings", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We test ReIn+ReOut based on the recent state-ofthe-art awd-lstm-lm codebase 5 (Merity et al., 2018) using two standard language modeling datasets: Penn Treebank (PTB) and WikiText-2 (Wiki2). We followed exactly the same setting in the source code. The results are shown in Table 1 , and we found that our ReIn+ReOut is effective even with the strong baseline.", |
|
"cite_spans": [ |
|
{ |
|
"start": 78, |
|
"end": 99, |
|
"text": "(Merity et al., 2018)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 273, |
|
"end": 280, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "LM on Standard English Datasets", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Wiki2 AWD-LSTM w/o fine-tuning (Merity et al., 2018) 58 ", |
|
"cite_spans": [ |
|
{ |
|
"start": 31, |
|
"end": 52, |
|
"text": "(Merity et al., 2018)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "PTB", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We applied our method to the standard long-short term memory networks (LSTMs) based sequenceto-sequence (seq2seq) model on two datasets: German-English (de-en) with 153K sentence pairs 83 from IWSLT 2014 (Cettolo et al., 2014) , English-Vietnamese (en-vi) with 133K sentence pairs from IWSLT 2015 (Cettolo et al., 2012) . The detailed data statistics of two low-resource NMT datasets is in Table 2 . We used the opennmt-py toolkit 6 with a 2-layer bidirectional LSTM with hidden size of 500 and set the training epoch to 30. The word embedding size is set to 500 and the batch size is 64. We trained the seq2seq models by the SGD optimizer with start learning rate being 1.0, which will be decayed by 0.5 if perplexity does not decrease on the validation set. Other hyper-parameters were kept default. We also include some published results based on LSTM-based seq2seq models to gauge the result of our baseline. As shown in Table 3 , without any extra language pair resources, the ReIn+ReOut initialization improves the BLEU score over the baseline by 1.29 and 0.51 points on de-en, en-vi respectively. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 204, |
|
"end": 226, |
|
"text": "(Cettolo et al., 2014)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 297, |
|
"end": 319, |
|
"text": "(Cettolo et al., 2012)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 390, |
|
"end": 397, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 925, |
|
"end": 932, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Low-resource NMT", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "To verify the effect of word clusters on different languages, we selected 8 datasets containing typologically diverse languages from LM datasets released by Gerz et al. (2018) . The data statistics of 8 LM datasets is in Table 5 . We basically used standard LSTMs instead of AWD-LSTM-LM to save time. We chose the available standard LSTM-LM code 7 . Hyper-parameters of our standard LSTM model on language modeling tasks is in Table 4 . The results are shown in Table 6 . Our LSTM-LM obtained better results than the one from Gerz et al. 2018 ", |
|
"cite_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 175, |
|
"text": "Gerz et al. (2018)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 221, |
|
"end": 228, |
|
"text": "Table 5", |
|
"ref_id": "TABREF8" |
|
}, |
|
{ |
|
"start": 427, |
|
"end": 434, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 462, |
|
"end": 469, |
|
"text": "Table 6", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "LM in Diverse Languages", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "In this section, we analyse ReIn+ReOut on the basis of LM experiments with en and de datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "To show the gain for frequent and infrequent words, we measured the perplexity for frequent and infrequent words in the test data separately. Specifically, we calculated the perplexity of the next word, when an infrequent word is given as the current word. A similar analysis on language models can be found in Vania and Lopez (2017) . Our analysis do not contain new words in the test dataset. The results are shown in Table 7 . As we see, ReIn+ReOut is more effective than CBOW in learning both the embeddings of frequent and infrequent words, as we explained in Sec. 3.2.1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 311, |
|
"end": 333, |
|
"text": "Vania and Lopez (2017)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 420, |
|
"end": 427, |
|
"text": "Table 7", |
|
"ref_id": "TABREF10" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Targeted Perplexity Results", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The results of ablation study are in Table 8 . Comparing the methods ReIn and CBOW, we found replacing only input infrequent words in CBOW also works better than the original CBOW. We can also conclude that replacing only output infrequent words in CBOW works better than the original CBOW, by comparing ReOut and CBOW. Both ReIn and ReOut work well even when they are used alone. As mentioned in the motivation of Re-Out, it makes the training more effective. To verify this, we increased the number of negative samples for ReIn and CBOW. The training will be more effective if we increase the number of negative samples, while training the model will also take longer time. As we increased the size of negative samples, we obtained better results for both ReIn and CBOW. We increased it only to 30 because we did not observe improvements when we made it further larger. This result indicates that we can use word clusters to obtain better results with a small amount of negative samples. In reality, we can also use off-the-shelf word clusters to avoid spending time for producing word clusters. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 44, |
|
"text": "Table 8", |
|
"ref_id": "TABREF12" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Study", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "To gauge the improvements, we used off-the-shelf pretrained word vectors in English: GloVe vectors (Pennington et al., 2014) and Google News Vectors 8 . We obtained 258, 290 and 289 perplexity scores on en with Google News Vectors, Glove vectors and ReIn+ReOut respectively. Although ReIn+ReOut underperforms Google News Vectors, which were trained on 100 billion tokens, it obtained the results comparable to Glove Vectors, trained on 6 billion tokens. This indicates that our ReIn+ReOut is effective even without extra training data (only 783K training tokens in en). Table 9 : Perplexity results of standard LSTM compared with off-the-shelf vectors.", |
|
"cite_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 124, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 570, |
|
"end": 577, |
|
"text": "Table 9", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "LM Results on Off-the-shelf Vectors", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We proposed a simple and effective method to incorporate word clusters into the CBOW model. Our method is effective on several downstream tasks. For future work, we will test our methods on larger corpora and also add more downstream tasks. We will also study how to combine word clusters and subword information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The word similarity task is not necessarily suitable for our replacing method due to many infrequent words sharing the same embedding within one cluster. Thus, we report the results of the task for three different groups of test word pairs: frequent-wordpair consisting of frequent word pairs, infrequentword-pair consisting of word pairs that share a cluster embedding with other words, and all-word-pair consisting of all test word pairs. We used the publicly available enwik8 9 corpus as the training data to obtain both word embeddings and word clusters. Note that we use this data only for the word similarity task, not for downstream tasks such has language modelling and machine translation. We preprocessed the corpus by lowercasing all words, removing words that contain non-alphabetical characters, and removing words whose frequency is less than 5. The final corpus contains approximately 12 million tokens and 60K word types. We chose MEN, MTurk287, MTurk771, RW and WS353 as our datasets. Then, we evaluated the quality of these representations by computing Spearman's rank correlation coefficient. One straightforward method to incorporate word cluster into CBOW model is to average the embeddings of word and its cluster referred as to AvgIn. We first applied ClusterCat to the preprocessed corpus to obtain word clusters and then produced cluster-incorporated word embeddings with ReIn+ReOut. The results are shown in Table 10 . In ReIn+ReOut, the number of input words is 10,203, which is the sum of 9,603 frequent words and 600 clusters. This is only 16.9% of the number of in-9 http://mattmahoney.net/dc/enwik8.zip Figure 2 : Visualization of the embeddings of frequent words and clusters before fine-tuning (left) and the embeddings of frequent and infrequent words after finetuning (right). The red circle represents frequent words. The color of infrequent words within different clusters are different (right), and the big circle represents word clusters (left).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1434, |
|
"end": 1442, |
|
"text": "Table 10", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 1634, |
|
"end": 1642, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Experiments on Word Similarity Task", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "put words for the original CBOW, which maintains 60K input words. In all word pair group, CBOW outperformed ReIn+ReOut on all datasets. This is because ReIn+ReOut does not perform well in infrequent-word-pair group as many infrequent words share exactly the same embedding in one cluster. In this experiment, each cluster had 82 words on average. However, ReIn+ReOut outperformed CBOW on frequent-word-pair group in all datasets. This result suggests that ReIn+ReOut is effective in learning embeddings for frequent words with much fewer parameters. AvgIn underperformed CBOW in all-word-pair group, which suggests that this straightforward way to incorporate word clusters is not effective. We also found that AvgIn+ReOut can improve the performance on 3 datasets in all-word-pairs group compared with AvgIn. However, AvgIn+ReOut still underperformed CBOW on all datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Experiments on Word Similarity Task", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We visualize word embeddings using t-SNE projections. Specifically, we randomly chose 15 clusters and all frequent words from en and visualize frequent and infrequent word embeddings in these 15 clusters in Figure 2 . The embeddings of infrequent words within one cluster are located close together after being fine-tuned. Some infrequent word embeddings are updated only several times and are not far away from where they were randomly initialized, and now they become more reliable.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 207, |
|
"end": 215, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "B Visualization of Word Embeddings", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We used ClusterCat (https://github.com/jonsafari/clustercat) as the implementation.2 https://github.com/yukunfeng/cluster-cbow", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/facebookresearch/fastText 4 When we set the minimum count of word occurrence to 1, the standard CBOW does not perform well.5 https://github.com/salesforce/awd-lstm-lm", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/OpenNMT/OpenNMT-py 7 https://github.com/pytorch/examples/tree/master/ word language model", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank anonymous reviewers for their constructive comments and Hu also thanks his support from China Scholarship Council.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "An actor-critic algorithm for sequence prediction", |
|
"authors": [ |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Courville", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Courville, and Yoshua Bengio. 2017. An actor-critic algorithm for sequence prediction. In International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Enriching word vectors with subword information", |
|
"authors": [ |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "135--146", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the Associa- tion for Computational Linguistics, 5:135-146.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Natural language processing with small feed-forward networks", |
|
"authors": [ |
|
{ |
|
"first": "Jan", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Botha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Pitler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ji", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anton", |
|
"middle": [], |
|
"last": "Bakalov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Salcianu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jan A. Botha, Emily Pitler, Ji Ma, Anton Bakalov, Alex Salcianu, David Weiss, Ryan T. McDonald, and Slav Petrov. 2017. Natural language processing with small feed-forward networks. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Wit 3 : Web inventory of transcribed and translated talks", |
|
"authors": [ |
|
{ |
|
"first": "Mauro", |
|
"middle": [], |
|
"last": "Cettolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Girardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 16 th Conference of the European Association for Machine Translation (EAMT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "261--268", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mauro Cettolo, Christian Girardi, and Marcello Fed- erico. 2012. Wit 3 : Web inventory of transcribed and translated talks. In Proceedings of the 16 th Confer- ence of the European Association for Machine Trans- lation (EAMT), pages 261-268, Trento, Italy.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Report on the 11th iwslt evaluation campaign, iwslt", |
|
"authors": [ |
|
{ |
|
"first": "Mauro", |
|
"middle": [], |
|
"last": "Cettolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Niehues", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "St\u00fcker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luisa", |
|
"middle": [], |
|
"last": "Bentivogli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the International Workshop on Spoken Language Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mauro Cettolo, Jan Niehues, Sebastian St\u00fcker, Luisa Bentivogli, and Marcello Federico. 2014. Report on the 11th iwslt evaluation campaign, iwslt 2014. In Proceedings of the International Workshop on Spo- ken Language Translation, Hanoi, Vietnam, page 57.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Joint learning of character and word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Xinxiong", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huanbo", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Twenty-Fourth International Joint Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xinxiong Chen, Lei Xu, Zhiyuan Liu, Maosong Sun, and Huanbo Luan. 2015. Joint learning of charac- ter and word embeddings. In Twenty-Fourth Inter- national Joint Conference on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "BIRA: Improved predictive exchange word clustering", |
|
"authors": [ |
|
{ |
|
"first": "Jon", |
|
"middle": [], |
|
"last": "Dehdari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liling", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Van Genabith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1169--1174", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jon Dehdari, Liling Tan, and Josef van Genabith. 2016. BIRA: Improved predictive exchange word cluster- ing. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Com- putational Linguistics: Human Language Technolo- gies (NAACL), pages 1169-1174, San Diego, CA, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Language modeling for morphologically rich languages: Character-aware modeling for word-level prediction", |
|
"authors": [ |
|
{ |
|
"first": "Daniela", |
|
"middle": [], |
|
"last": "Gerz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Vuli\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edoardo", |
|
"middle": [], |
|
"last": "Ponti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Naradowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roi", |
|
"middle": [], |
|
"last": "Reichart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Korhonen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Transactions of the Association of Computational Linguistics", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "451--465", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniela Gerz, Ivan Vuli\u0107, Edoardo Ponti, Jason Narad- owsky, Roi Reichart, and Anna Korhonen. 2018. Language modeling for morphologically rich lan- guages: Character-aware modeling for word-level prediction. Transactions of the Association of Com- putational Linguistics, 6:451-465.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Towards neural phrasebased machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Po-Sen", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sitao", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dengyong", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Po-Sen Huang, Chong Wang, Sitao Huang, Dengyong Zhou, and Li Deng. 2018. Towards neural phrase- based machine translation. In International Confer- ence on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "A dependency parser for tweets", |
|
"authors": [ |
|
{ |
|
"first": "Lingpeng", |
|
"middle": [], |
|
"last": "Kong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Schneider", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Swabha", |
|
"middle": [], |
|
"last": "Swayamdipta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Archna", |
|
"middle": [], |
|
"last": "Bhatia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah A", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1001--1012", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lingpeng Kong, Nathan Schneider, Swabha Swayamdipta, Archna Bhatia, Chris Dyer, and Noah A Smith. 2014. A dependency parser for tweets. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1001-1012.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Stanford neural machine translation systems for spoken language domain", |
|
"authors": [ |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Workshop on Spoken Language Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minh-Thang Luong and Christopher D. Manning. 2015. Stanford neural machine translation systems for spo- ken language domain. In International Workshop on Spoken Language Translation.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Better word representations with recursive neural networks for morphology", |
|
"authors": [ |
|
{ |
|
"first": "Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Seventeenth Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "104--113", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thang Luong, Richard Socher, and Christopher Man- ning. 2013. Better word representations with re- cursive neural networks for morphology. In Pro- ceedings of the Seventeenth Conference on Computa- tional Natural Language Learning, pages 104-113.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Regularizing and optimizing lstm language models", |
|
"authors": [ |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Merity", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nitish", |
|
"middle": [], |
|
"last": "Shirish Keskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen Merity, Nitish Shirish Keskar, and Richard Socher. 2018. Regularizing and optimizing lstm language models. In International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Efficient estimation of word representations in vector space", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1301.3781" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jef- frey Dean. 2013a. Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Distributed representations of words and phrases and their compositionality", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3111--3119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013b. Distributed representa- tions of words and phrases and their compositional- ity. In Advances in neural information processing systems, pages 3111-3119.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Efficient nonparametric estimation of multiple embeddings per word in vector space", |
|
"authors": [ |
|
{ |
|
"first": "Arvind", |
|
"middle": [], |
|
"last": "Neelakantan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeevan", |
|
"middle": [], |
|
"last": "Shankar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Passos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arvind Neelakantan, Jeevan Shankar, Alexandre Pas- sos, and Andrew McCallum. 2014. Efficient non- parametric estimation of multiple embeddings per word in vector space. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. Glove: Global vectors for word rep- resentation. In Empirical Methods in Natural Lan- guage Processing (EMNLP), pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Co-learning of word representations and morpheme representations", |
|
"authors": [ |
|
{ |
|
"first": "Siyu", |
|
"middle": [], |
|
"last": "Qiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qing", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiang", |
|
"middle": [], |
|
"last": "Bian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bin", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tie-Yan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of COL-ING 2014, the 25th International Conference on Computational Linguistics: Technical Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "141--150", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Siyu Qiu, Qing Cui, Jiang Bian, Bin Gao, and Tie-Yan Liu. 2014. Co-learning of word representations and morpheme representations. In Proceedings of COL- ING 2014, the 25th International Conference on Computational Linguistics: Technical Papers, pages 141-150.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Named entity recognition in tweets: an experimental study", |
|
"authors": [ |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Ritter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the conference on empirical methods in natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1524--1534", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alan Ritter, Sam Clark, Oren Etzioni, et al. 2011. Named entity recognition in tweets: an experimental study. In Proceedings of the conference on empiri- cal methods in natural language processing, pages 1524-1534. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Compressing word embeddings via deep compositional code learning", |
|
"authors": [ |
|
{ |
|
"first": "Raphael", |
|
"middle": [], |
|
"last": "Shu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hideki", |
|
"middle": [], |
|
"last": "Nakayama", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Raphael Shu and Hideki Nakayama. 2018. Compress- ing word embeddings via deep compositional code learning. In International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "From characters to words to in between: Do we capture morphology?", |
|
"authors": [ |
|
{ |
|
"first": "Clara", |
|
"middle": [], |
|
"last": "Vania", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Lopez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2016--2027", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-1184" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Clara Vania and Adam Lopez. 2017. From characters to words to in between: Do we capture morphol- ogy? In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 2016-2027, Vancouver, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Improving statistical machine translation with word class models", |
|
"authors": [ |
|
{ |
|
"first": "Joern", |
|
"middle": [], |
|
"last": "Wuebker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Peitz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Rietig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1377--1381", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joern Wuebker, Stephan Peitz, Felix Rietig, and Her- mann Ney. 2013. Improving statistical machine translation with word class models. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 1377-1381.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "CBOW architecture with our replacing method for input and output words trained with negative sampling. Suppose that w t\u22122 , w t+1 , o 1 and o 3 are infrequent words." |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"text": "Perplexity results on PTB and Wiki2.", |
|
"html": null, |
|
"num": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"text": "Data statistics of two low-resource NMT datasets.", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td/><td>de-en</td><td>en-vi</td></tr><tr><td>seq2seq with attention (Luong and Manning, 2015)</td><td>-</td><td>23.3</td></tr><tr><td>AC+LL (Bahdanau et al., 2017)</td><td>28.53</td><td>-</td></tr><tr><td>NPMT (Huang et al., 2018)</td><td>29.92</td><td>27.69</td></tr><tr><td>Our seq2seq with attention</td><td>28.95</td><td>28.16</td></tr><tr><td>CBOW</td><td>29.25</td><td>28.24</td></tr><tr><td>Our ReIn+ReOut</td><td>30.24</td><td>28.67</td></tr></table>" |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"text": "", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td>: BLEU scores on two low-resource MT</td></tr><tr><td>datasets. NPMT in Huang et al. (2018) used a neural</td></tr><tr><td>phrase-based machine translation model and AC+LL in</td></tr><tr><td>Bahdanau et al. (2017) used a one-layer GRU encoder</td></tr><tr><td>and decoder with attention.</td></tr></table>" |
|
}, |
|
"TABREF5": { |
|
"type_str": "table", |
|
"text": "on all datasets. As we see, ReIn+ReOut is effective for typologically diverse languages and also requires a smaller input vocabulary. For example, the input vocabulary of ReIn+ReOut for en dataset contains 1.3K words while the full vocabulary 50K.", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td>Embedding size</td><td>200</td></tr><tr><td>Epochs</td><td>40</td></tr><tr><td>LSTM layers</td><td>2</td></tr><tr><td>Optimizer</td><td>SGD</td></tr><tr><td>LSTM sequence length</td><td>35</td></tr><tr><td>Learning rate</td><td>20</td></tr><tr><td>LSTM hidden unit</td><td>200</td></tr><tr><td>Learning rate decay</td><td>4</td></tr><tr><td colspan=\"2\">Param. init: rand uniform [-0.1,0.1]</td></tr><tr><td>Gradient clipping</td><td>0.25</td></tr><tr><td>Dropout</td><td>0.2</td></tr><tr><td>Batch size</td><td>20</td></tr></table>" |
|
}, |
|
"TABREF6": { |
|
"type_str": "table", |
|
"text": "Hyper-parameters of our standard LSTM model on language modeling task.", |
|
"html": null, |
|
"num": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF8": { |
|
"type_str": "table", |
|
"text": "Data statistics of 8 language modeling datasets and size of input vocabulary of our ReIn+ReOut.", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"4\">Dataset Random CBOW ReIn+ReOut</td></tr><tr><td>zh</td><td>555</td><td>527</td><td>494</td></tr><tr><td>vi</td><td>153</td><td>145</td><td>138</td></tr><tr><td>de</td><td>609</td><td>542</td><td>484</td></tr><tr><td>en</td><td>365</td><td>317</td><td>289</td></tr><tr><td>ar</td><td>1647</td><td>1447</td><td>1305</td></tr><tr><td>he</td><td>1482</td><td>1236</td><td>1175</td></tr><tr><td>et</td><td>1451</td><td>1157</td><td>1004</td></tr><tr><td>tr</td><td>1379</td><td>1220</td><td>1148</td></tr></table>" |
|
}, |
|
"TABREF9": { |
|
"type_str": "table", |
|
"text": "Perplexity results of standard LSTM LM on 8 datasets with different initialization methods.", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td/><td colspan=\"3\">Freq. Infreq. All</td></tr><tr><td>en</td><td>CBOW ReIn+ReOut 316 340</td><td>198 184</td><td>283 264</td></tr><tr><td>de</td><td>CBOW ReIn+ReOut 564 591</td><td>352 318</td><td>489 458</td></tr></table>" |
|
}, |
|
"TABREF10": { |
|
"type_str": "table", |
|
"text": "Targeted perplexity results of standard LSTM LM with different initializations.", |
|
"html": null, |
|
"num": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF12": { |
|
"type_str": "table", |
|
"text": "Perplexity results of LSTM LM by changing the number of negative samples. '+neg' represents the number of negative samples, which is 5 at default.", |
|
"html": null, |
|
"num": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF15": { |
|
"type_str": "table", |
|
"text": "Spearman's rank correlation coefficient on word similarity datasets for different groups. The best scores in each group are in bold.", |
|
"html": null, |
|
"num": null, |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |