|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T03:42:51.385538Z" |
|
}, |
|
"title": "NLPRL System for Very Low Resource Supervised Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Rupjyoti", |
|
"middle": [], |
|
"last": "Baruah", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology (BHU) Varanasi", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Rajesh", |
|
"middle": [], |
|
"last": "Kumar Mundotiya", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology (BHU) Varanasi", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Amit", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology (BHU) Varanasi", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Kumar", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology (BHU) Varanasi", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper describes the results of the system that we used for the WMT20 very low resource (VLR) supervised MT shared task. For our experiments, we use a byte-level version of BPE, which requires a base vocabulary of size 256 only. BPE based models are a kind of sub-word models. Such models try to address the Out of Vocabulary (OOV) word problem by performing word segmentation so that segments correspond to morphological units. They are also reported to work across different languages, especially similar languages due to their sub-word nature. Based on BLEU cased score, our NLPRL systems ranked ninth for HSB to GER and tenth in GER to HSB translation scenario.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper describes the results of the system that we used for the WMT20 very low resource (VLR) supervised MT shared task. For our experiments, we use a byte-level version of BPE, which requires a base vocabulary of size 256 only. BPE based models are a kind of sub-word models. Such models try to address the Out of Vocabulary (OOV) word problem by performing word segmentation so that segments correspond to morphological units. They are also reported to work across different languages, especially similar languages due to their sub-word nature. Based on BLEU cased score, our NLPRL systems ranked ninth for HSB to GER and tenth in GER to HSB translation scenario.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "We report the results for our system that was used for our participation in the WMT20 shared task (Barrault et al., 2019) on very low resource Machine Translation (MT). The MT systems were built for the language pair Upper Sorbian (HSB) and German (GER) in both translation directions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 98, |
|
"end": 121, |
|
"text": "(Barrault et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 231, |
|
"end": 236, |
|
"text": "(HSB)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The Sorbian languages are the West Slavic branch of the Indo-European languages, which have further categorized into two closely related languages, Upper Sorbian and Lower Sorbian. The categories of this language are recognized as a different and distinct language in the European Charter for Regional or Minority languages (Dolowy-Rybinska, 2011) . Upper Sorbian is a minority language of Germany that is spoken by 10, 000 to 15, 000 speakers (Elle, 2010), although this number is continually declining (Do\u0142owy-Rybi\u0144ska, 2018). To counter this, attempts are being made to increase the number of Sorbian speakers through bilingual educational scenarios and MT 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 324, |
|
"end": 347, |
|
"text": "(Dolowy-Rybinska, 2011)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Low resource MT was being attempted even before Neural Machine Translation (NMT) became the state-of-the-art. Several methods are used to improve the accuracy and quality of the lowresource SMT systems by using comparable corpora (Irvine and Callison-Burch, 2013; Babych et al., 2007) , pivot language (English or non-English) technique (Ahmadnia et al., 2017; Paul et al., 2013) , and using related resource-rich language (Nakov and Ng, 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 230, |
|
"end": 263, |
|
"text": "(Irvine and Callison-Burch, 2013;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 264, |
|
"end": 284, |
|
"text": "Babych et al., 2007)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 360, |
|
"text": "(Ahmadnia et al., 2017;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 361, |
|
"end": 379, |
|
"text": "Paul et al., 2013)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 443, |
|
"text": "(Nakov and Ng, 2012)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We use a byte-level version of Byte Pair Encoding based model with a Transformer for our experiments. The main motivation was to try out this model for the shared task and see how it works under a shared task setting.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "NMT is an end-to-end learning system (Bahdanau et al., 2015) , based on the data-driven approach of machine translation, that requires a massive amount of parallel data for training.", |
|
"cite_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 60, |
|
"text": "(Bahdanau et al., 2015)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To overcome the lack of such data, several techniques have been tried out which are based on semi-supervised learning (Zheng et al. 2019 A model-agnostic meta-learning algorithm (Finn et al., 2017) for low-resource NMT exploits the multilingual high-resource language tasks (Gu et al., 2018b) . Gu et al. (2018a) achieved significant improvement in performance by utilizing a transfer-learning approach for extremely low resource languages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 178, |
|
"end": 197, |
|
"text": "(Finn et al., 2017)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 274, |
|
"end": 292, |
|
"text": "(Gu et al., 2018b)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 295, |
|
"end": 312, |
|
"text": "Gu et al. (2018a)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Another proposed solution is to use word segmentation units, e.g. characters (Chung et al., 2016) , mixed word/characters (Luong and Man-ning, 2016) , or more intelligent sub-words (Sennrich et al., 2016) . It is claimed that an NMT model using such an approach is capable of openvocabulary translation by encoding rare and unknown words as sequences of sub-word units.", |
|
"cite_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 97, |
|
"text": "(Chung et al., 2016)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 122, |
|
"end": 148, |
|
"text": "(Luong and Man-ning, 2016)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 181, |
|
"end": 204, |
|
"text": "(Sennrich et al., 2016)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The purpose of our experiments was to try out a supervised NMT system for the low resource language like HSB to GER and vice-versa for the WMT20 shared task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The standard Transformer architecture proposed by Vaswani et al. (2017) is used for this experiment. This architecture is able to handle long-term dependencies among input tokens, output tokens and between input-output by multi-head attention mechanism. Our method based on the model architecture of , which had used the Byte-level BPE (BBPE).", |
|
"cite_spans": [ |
|
{ |
|
"start": 50, |
|
"end": 71, |
|
"text": "Vaswani et al. (2017)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The BBPE encoding is deployed on the Byte Pair Encoding (BPE) (Sennrich et al., 2016) , which is a subword algorithm to find a way to represent the given entire text dataset with a small number of tokens. BPE tries to find a balance between characterand word-level hybrid representations, enabling the encoding of any rare words in the vocabulary with appropriate subword tokens without introducing any \"unknown\" tokens. These segmented byte sequences are encoded into variable-length tokens, i.e., n-grams, which leads to the generation of the BPE vocabulary with byte n-grams. Before being fed to the Transformer model, the learned BBPE passes through bidirectional GRU, which enables to retain contextualization between byte representation of BPE.", |
|
"cite_spans": [ |
|
{ |
|
"start": 62, |
|
"end": 85, |
|
"text": "(Sennrich et al., 2016)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We use the Fairseq 2 (Ott et al., 2019) library to train the Transformer with the same learning rate as in the original paper.", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 39, |
|
"text": "(Ott et al., 2019)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our models were trained on the data provided by the Workshop on Machine Translation (WMT) 2020. The statistics about the training, validation and test sets are 60000, 2000 and 2000, respectively for both directional pairs (HSB -GER).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset and Preprocessing", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We obtained 1727916 and 1710293 tokens of the GER and HSB, respectively, from the train set for preprocessing. The BPE vocabulary, Byte vocabulary and Character vocabulary are 16384, 2048 and 4096, respectively, for generating binary dataset by using fairseq-preprocess. The BBPE used as a subword BPE tokenizer, where preprocessing was performed using lowercasing only. This is beneficial from the low resource point of view, but it loses the case information for German, which could have affected the results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset and Preprocessing", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We trained the Transformer model with Bi-GRU embedding, in which contextualization using the number of encoder and decoder layers are 2 with the dropout value 0.3. We trained our model with a batch size of 100, with the aid of Adam optimizer at 0.0005 learning rate. The learning rate has warmup update by 4000 to label smoothed cross-entropy loss function with label-smoothing value 0.1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Details", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The BBPE based Transformer model was evaluated on the blind test set at five different metrics provided by the task organizer, namely BLEU (Papineni et al., 2002) , BLEU-cased, TER (Snover et al., 2006) , BEER2.0 (Stanojevi\u0107 and Sima'an, 2014) , and CharacTER (Wang et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 139, |
|
"end": 162, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 181, |
|
"end": 202, |
|
"text": "(Snover et al., 2006)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 213, |
|
"end": 243, |
|
"text": "(Stanojevi\u0107 and Sima'an, 2014)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 260, |
|
"end": 279, |
|
"text": "(Wang et al., 2016)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The obtained metrics score for each pair to each experiment is specified in Table 1 . The prediction of the test set was generated by performing the best validation checkpoint. However, while comparing the BLEU score of the valid set with the test set, we obtained a difference of +3.21 for HSB\u2192GER and +0.15 for GER\u2192HSB pairs.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 83, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Before submitting the predictions of the test set, the BLEU scores of best and last checkpoints were almost equal, as shown in Table 2 . Moreover, the vocabulary size plays a crucial role in data-driven approaches of MTs as well. Hence, we have increased the vocabulary size from 2048 to 4096 for generating BBPE, which led to a small decrement in the BLEU score. One possible reason for such decrement is the small vocabulary size that generates generalized BBPE for low-resource language.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 127, |
|
"end": 134, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We have report the results for a Transformer-based MT system for the pair of HSB\u2194GER in very low resource settings. The introduced MT system works on Byte-level Byte Pair Encoding (BBPE), which yields 48.4 and 46.5 on HSB\u2192GER and GER\u2192HSB, respectively, as the BLEU score on the test set at the vocabulary size of 2048. When the vocabulary size was increased from 2048 to 4096, lower performance was obtained on the system on either side of the pair on the validation set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and future work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "https://minorityrights.org/ minorities/sorbs/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/pytorch/fairseq", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The support and the resources provided by PARAM Shivay Facility under the National Supercomputing Mission, Government of India at the Indian Institute of Technology, Varanasi are gratefully acknowledged.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Persian-Spanish Low-Resource Statistical Machine Translation Through English as Pivot Language", |
|
"authors": [ |
|
{ |
|
"first": "Javier", |
|
"middle": [], |
|
"last": "Benyamin Ahmadnia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gholamreza", |
|
"middle": [], |
|
"last": "Serrano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Haffari", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the International Conference Recent Advances in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "24--30", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benyamin Ahmadnia, Javier Serrano, and Gholam- reza Haffari. 2017. Persian-Spanish Low-Resource Statistical Machine Translation Through English as Pivot Language. In Proceedings of the International Conference Recent Advances in Natural Language Processing, RANLP 2017, pages 24-30.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Neural Machine Translation, What Does Transfer Learning Transfer? In Proceedings of the 2020 Annual Conference of the Association for Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Alham", |
|
"middle": [], |
|
"last": "Fikri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aji", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alham Fikri Aji. 2020. In Neural Machine Translation, What Does Transfer Learning Transfer? In Proceed- ings of the 2020 Annual Conference of the Associa- tion for Computational Linguistics. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Translating from under-resourced languages: Comparing direct transfer against pivot translation", |
|
"authors": [ |
|
{ |
|
"first": "Bogdan", |
|
"middle": [], |
|
"last": "Babych", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Hartley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Serge", |
|
"middle": [], |
|
"last": "Sharoff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the MT Summit XI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "412--418", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bogdan Babych, Anthony Hartley, and Serge Sharoff. 2007. Translating from under-resourced languages: Comparing direct transfer against pivot translation. Proceedings of the MT Summit XI, pages 412-418.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Neural Machine Translation by Jointly Learning to Align and Translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural Machine Translation by Jointly Learning to Align and Translate. In ICLR 2015.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Findings of the 2019 Conference on Machine Translation (WMT19", |
|
"authors": [ |
|
{ |
|
"first": "Lo\u00efc", |
|
"middle": [], |
|
"last": "Barrault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Bojar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marta", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Costa-Juss\u00e0", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Federmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Fishel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yvette", |
|
"middle": [], |
|
"last": "Graham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Huck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shervin", |
|
"middle": [], |
|
"last": "Malmasi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "1--61", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lo\u00efc Barrault, Ond\u0159ej Bojar, Marta R Costa-Juss\u00e0, Christian Federmann, Mark Fishel, Yvette Gra- ham, Barry Haddow, Matthias Huck, Philipp Koehn, Shervin Malmasi, et al. 2019. Findings of the 2019 Conference on Machine Translation (WMT19). In Proceedings of the Fourth Conference on Machine Translation (Volume 2: Shared Task Papers, Day 1), pages 1-61.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A Character-level Decoder without Explicit Segmentation for Neural Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Junyoung", |
|
"middle": [], |
|
"last": "Chung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1693--1703", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Junyoung Chung, Kyunghyun Cho, and Yoshua Ben- gio. 2016. A Character-level Decoder without Ex- plicit Segmentation for Neural Machine Translation. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1693-1703.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A Survey of Multilingual Neural Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Raj", |
|
"middle": [], |
|
"last": "Dabre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenhui", |
|
"middle": [], |
|
"last": "Chu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anoop", |
|
"middle": [], |
|
"last": "Kunchukuttan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "ACM Comput. Surv", |
|
"volume": "", |
|
"issue": "5", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3406095" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Raj Dabre, Chenhui Chu, and Anoop Kunchukuttan. 2020. A Survey of Multilingual Neural Machine Translation. ACM Comput. Surv., 53(5).", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A model minority", |
|
"authors": [ |
|
{ |
|
"first": "Nicole", |
|
"middle": [], |
|
"last": "Dolowy-Rybinska", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "sight Academia", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nicole Dolowy-Rybinska. 2011. A model minority. In- sight Academia.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Learning Upper Sorbian. The problems with minority language education for non-native pupils in the Upper Sorbian grammar school in Bautzen/Budy\u0161in", |
|
"authors": [ |
|
{ |
|
"first": "Nicole", |
|
"middle": [], |
|
"last": "Do\u0142owy-Rybi\u0144ska", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Journal of Bilingual Education and Bilingualism", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--15", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nicole Do\u0142owy-Rybi\u0144ska. 2018. Learning Upper Sor- bian. The problems with minority language educa- tion for non-native pupils in the Upper Sorbian gram- mar school in Bautzen/Budy\u0161in. International Jour- nal of Bilingual Education and Bilingualism, pages 1-15.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Sorben-demographische und statistische Aspekte", |
|
"authors": [ |
|
{ |
|
"first": "Ludwig", |
|
"middle": [ |
|
"Elle" |
|
], |
|
"last": "Vogt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Theodor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Neyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "J\u00fcrgen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dieter", |
|
"middle": [], |
|
"last": "Bingen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Et", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sokol", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Minderheiten als Mehrwert", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "309--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ludwig Elle. 2010. Sorben-demographische und statistische Aspekte. Vogt, Matthias Theodor, Neyer, J\u00fcrgen, Bingen, Dieter et Jan Sokol (\u00e9ds.), Minder- heiten als Mehrwert, Peter Lang GmbH, Frankfurt am Main, pages 309-318.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Model-Agnostic Meta-Learning for Fast Adaptation of Deep Networks", |
|
"authors": [ |
|
{ |
|
"first": "Chelsea", |
|
"middle": [], |
|
"last": "Finn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pieter", |
|
"middle": [], |
|
"last": "Abbeel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Levine", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 34th International Conference on Machine Learning", |
|
"volume": "70", |
|
"issue": "", |
|
"pages": "1126--1135", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chelsea Finn, Pieter Abbeel, and Sergey Levine. 2017. Model-Agnostic Meta-Learning for Fast Adaptation of Deep Networks. In Proceedings of the 34th Inter- national Conference on Machine Learning-Volume 70, pages 1126-1135.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Universal Neural Machine Translation for Extremely Low Resource Languages", |
|
"authors": [ |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hany", |
|
"middle": [], |
|
"last": "Hassan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Victor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "344--354", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiatao Gu, Hany Hassan, Jacob Devlin, and Victor OK Li. 2018a. Universal Neural Machine Translation for Extremely Low Resource Languages. In Pro- ceedings of the 2018 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, Vol- ume 1 (Long Papers), pages 344-354.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Meta-Learning for Low-Resource Neural Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yun", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Victor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3622--3631", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiatao Gu, Yong Wang, Yun Chen, Victor OK Li, and Kyunghyun Cho. 2018b. Meta-Learning for Low- Resource Neural Machine Translation. In Proceed- ings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 3622-3631.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Combining Bilingual and Comparable Corpora for Low Resource Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Ann", |
|
"middle": [], |
|
"last": "Irvine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the eighth workshop on statistical machine translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "262--270", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ann Irvine and Chris Callison-Burch. 2013. Combin- ing Bilingual and Comparable Corpora for Low Re- source Machine Translation. In Proceedings of the eighth workshop on statistical machine translation, pages 262-270.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Pivot-based Transfer Learning for Neural Machine Translation between Non-English Languages", |
|
"authors": [ |
|
{ |
|
"first": "Yunsu", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Petre", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pavel", |
|
"middle": [], |
|
"last": "Petrushkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shahram", |
|
"middle": [], |
|
"last": "Khadivi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "865--875", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yunsu Kim, Petre Petrov, Pavel Petrushkov, Shahram Khadivi, and Hermann Ney. 2019. Pivot-based Transfer Learning for Neural Machine Translation between Non-English Languages. In Proceedings of the 2019 Conference on Empirical Methods in Nat- ural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 865-875.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "MetaMT, a Meta Learning Method Leveraging Multiple Domain Data for Low Resource Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Rumeng", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xun", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "The Tenth AAAI Symposium on Educational Advances in Artificial Intelligence", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "8245--8252", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rumeng Li, Xun Wang, and Hong Yu. 2020. MetaMT, a Meta Learning Method Leveraging Multiple Do- main Data for Low Resource Machine Translation. In The Thirty-Fourth AAAI Conference on Artificial Intelligence, AAAI 2020, The Thirty-Second Inno- vative Applications of Artificial Intelligence Confer- ence, IAAI 2020, The Tenth AAAI Symposium on Ed- ucational Advances in Artificial Intelligence, EAAI 2020, New York, NY, USA, February 7-12, 2020, pages 8245-8252. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Achieving Open Vocabulary Neural Machine Translation with Hybrid Word-Character Models", |
|
"authors": [ |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1054--1063", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minh-Thang Luong and Christopher D Manning. 2016. Achieving Open Vocabulary Neural Machine Trans- lation with Hybrid Word-Character Models. In Pro- ceedings of the 54th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 1: Long Papers), pages 1054-1063.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Improving Statistical Machine Translation for a Resource-Poor Language Using Related Resource-Rich Languages", |
|
"authors": [ |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hwee Tou", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Journal of Artificial Intelligence Research", |
|
"volume": "44", |
|
"issue": "", |
|
"pages": "179--222", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Preslav Nakov and Hwee Tou Ng. 2012. Improving Statistical Machine Translation for a Resource-Poor Language Using Related Resource-Rich Languages. Journal of Artificial Intelligence Research, 44:179- 222.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "fairseq: A Fast, Extensible Toolkit for Sequence Modeling", |
|
"authors": [ |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Edunov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexei", |
|
"middle": [], |
|
"last": "Baevski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Grangier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Auli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "48--53", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, and Michael Auli. 2019. fairseq: A Fast, Extensible Toolkit for Sequence Modeling. In Proceedings of the 2019 Conference of the North American Chap- ter of the Association for Computational Linguistics (Demonstrations), pages 48-53.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "BLEU: a Method for Automatic Evaluation of Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th annual meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. BLEU: a Method for Automatic Evaluation of Machine Translation. In Proceedings of the 40th annual meeting of the Association for Computational Linguistics, pages 311-318.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "How to Choose the Best Pivot Language for Automatic Translation of Low-Resource Languages", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Paul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Finch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eiichrio", |
|
"middle": [], |
|
"last": "Sumita", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "ACM Transactions on Asian Language Information Processing (TALIP)", |
|
"volume": "12", |
|
"issue": "4", |
|
"pages": "1--17", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Paul, Andrew Finch, and Eiichrio Sumita. 2013. How to Choose the Best Pivot Language for Automatic Translation of Low-Resource Languages. ACM Transactions on Asian Language Information Processing (TALIP), 12(4):1-17.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Neural Machine Translation of Rare Words with Subword Units", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1715--1725", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural Machine Translation of Rare Words with Subword Units. In Proceedings of the 54th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715- 1725.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Leveraging Monolingual Data with Self-Supervision for Multilingual Neural Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Siddhant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Bapna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuan", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Orhan", |
|
"middle": [], |
|
"last": "Firat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mia", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sneha", |
|
"middle": [], |
|
"last": "Kudugunta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naveen", |
|
"middle": [], |
|
"last": "Arivazhagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonghui", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aditya Siddhant, Ankur Bapna, Yuan Cao, Orhan Firat, Mia Chen, Sneha Kudugunta, Naveen Arivazhagan, and Yonghui Wu. 2020. Leveraging Monolingual Data with Self-Supervision for Multilingual Neural Machine Translation. In Proceedings of the 2020", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Annual Conference of the Association for Computational Linguistics", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2827--2835", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Annual Conference of the Association for Compu- tational Linguistics, pages 2827-2835. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "A Study of Translation Edit Rate with Targeted Human Annotation", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Snover", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bonnie", |
|
"middle": [], |
|
"last": "Dorr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linnea", |
|
"middle": [], |
|
"last": "Micciulla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Makhoul", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of association for machine translation in the Americas", |
|
"volume": "200", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Snover, Bonnie Dorr, Richard Schwartz, Lin- nea Micciulla, and John Makhoul. 2006. A Study of Translation Edit Rate with Targeted Human An- notation. In Proceedings of association for ma- chine translation in the Americas, volume 200. Cam- bridge, MA.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "BEER: BEtter Evaluation as Ranking", |
|
"authors": [ |
|
{ |
|
"first": "Milo\u0161", |
|
"middle": [], |
|
"last": "Stanojevi\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Ninth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "414--419", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Milo\u0161 Stanojevi\u0107 and Khalil Sima'an. 2014. BEER: BEtter Evaluation as Ranking. In Proceedings of the Ninth Workshop on Statistical Machine Translation, pages 414-419.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Knowledge Distillation for Multilingual Unsupervised Neural Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Haipeng", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kehai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masao", |
|
"middle": [], |
|
"last": "Utiyama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eiichiro", |
|
"middle": [], |
|
"last": "Sumita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tiejun", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3525--3535", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.324" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haipeng Sun, Rui Wang, Kehai Chen, Masao Utiyama, Eiichiro Sumita, and Tiejun Zhao. 2020. Knowledge Distillation for Multilingual Unsupervised Neural Machine Translation. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, pages 3525-3535. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Attention Is All You Need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention Is All You Need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Neural Machine Translation with Byte-Level Subwords", |
|
"authors": [ |
|
{ |
|
"first": "Changhan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "The Thirty-Second Innovative Applications of Artificial Intelligence Conference", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "9154--9160", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Changhan Wang, Kyunghyun Cho, and Jiatao Gu. 2020. Neural Machine Translation with Byte-Level Sub- words. In The Thirty-Fourth AAAI Conference on Artificial Intelligence, AAAI 2020, The Thirty- Second Innovative Applications of Artificial Intelli- gence Conference, IAAI 2020, The Tenth AAAI Sym- posium on Educational Advances in Artificial Intel- ligence, EAAI 2020, New York, NY, USA, February 7-12, 2020, pages 9154-9160. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "CharacTer: Translation Edit Rate on Character Level", |
|
"authors": [ |
|
{ |
|
"first": "Weiyue", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan-Thorsten", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hendrik", |
|
"middle": [], |
|
"last": "Rosendahl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the First Conference on Machine Translation", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "505--510", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Weiyue Wang, Jan-Thorsten Peter, Hendrik Rosendahl, and Hermann Ney. 2016. CharacTer: Translation Edit Rate on Character Level. In Proceedings of the First Conference on Machine Translation: Volume 2, Shared Task Papers, pages 505-510.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Generative Neural Machine Translation", |
|
"authors": [], |
|
"year": null, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Generative Neural Machine Translation. In Interna- tional Conference on Learning Representations.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "), unsupervised learning(Sun et al. (2020)), data augmentation(Siddhant et al. (2020)), transfer learning (Aji (2020)), meta-learning(Li et al. (2020)), pivot-based(Kim et al. (2019)), and multilingual machine translation(Dabre et al. (2020)).", |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"html": null, |
|
"text": "Obtained scores of different metrics on the test set, provided by the task organizers", |
|
"type_str": "table", |
|
"content": "<table><tr><td>Pair</td><td colspan=\"5\">BLEU BLEU cased TER BEER2.0 CharacTER</td></tr><tr><td>HSB-GER</td><td>48.4</td><td>47.9</td><td>0.383</td><td>0.706</td><td>0.335</td></tr><tr><td>GER-HSB</td><td>46.5</td><td>45.9</td><td>0.389</td><td>0.696</td><td>0.323</td></tr><tr><td/><td/><td/><td>Valid</td><td/><td>Test</td></tr><tr><td>Vocab</td><td>Pair</td><td colspan=\"4\">Checkpoint Checkpoint Checkpoint</td></tr><tr><td/><td/><td>(last)</td><td/><td>(best)</td><td>(best)</td></tr><tr><td>2048</td><td>HSB-GER GER-HSB</td><td>45.92 46.62</td><td/><td>45.19 46.35</td><td>48.4 46.5</td></tr><tr><td>4096</td><td>HSB-GER GER-HSB</td><td>45.77 46.96</td><td/><td>45.09 46.24</td><td>--</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"text": "Effect on BLEU by increasing vocabulary size", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |