|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T03:41:38.909356Z" |
|
}, |
|
"title": "CUNI Systems for the Unsupervised and Very Low Resource Translation Task in WMT20", |
|
"authors": [ |
|
{ |
|
"first": "Ivana", |
|
"middle": [], |
|
"last": "Kvapil\u00edkov\u00e1", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Charles University", |
|
"location": { |
|
"postCode": "118 00", |
|
"settlement": "Prague", |
|
"country": "Czech Republic" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Kocmi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Charles University", |
|
"location": { |
|
"postCode": "118 00", |
|
"settlement": "Prague", |
|
"country": "Czech Republic" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Bojar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Charles University", |
|
"location": { |
|
"postCode": "118 00", |
|
"settlement": "Prague", |
|
"country": "Czech Republic" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper presents a description of CUNI systems submitted to the WMT20 task on unsupervised and very low-resource supervised machine translation between German and Upper Sorbian. We experimented with training on synthetic data and pre-training on a related language pair. In the fully unsupervised scenario, we achieved 25.5 and 23.7 BLEU translating from and into Upper Sorbian, respectively. Our low-resource systems relied on transfer learning from German-Czech parallel data and achieved 57.4 BLEU and 56.1 BLEU, which is an improvement of 10 BLEU points over the baseline trained only on the available small German-Upper Sorbian parallel corpus.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper presents a description of CUNI systems submitted to the WMT20 task on unsupervised and very low-resource supervised machine translation between German and Upper Sorbian. We experimented with training on synthetic data and pre-training on a related language pair. In the fully unsupervised scenario, we achieved 25.5 and 23.7 BLEU translating from and into Upper Sorbian, respectively. Our low-resource systems relied on transfer learning from German-Czech parallel data and achieved 57.4 BLEU and 56.1 BLEU, which is an improvement of 10 BLEU points over the baseline trained only on the available small German-Upper Sorbian parallel corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "An extensive area of the machine translation (MT) research focuses on training translation systems without large parallel data resources (Artetxe et al., 2018b (Artetxe et al., ,a, 2019 Lample et al., 2018a,b) . The WMT20 translation competition presents a separate task on unsupervised and very low-resource supervised MT.", |
|
"cite_spans": [ |
|
{ |
|
"start": 137, |
|
"end": 159, |
|
"text": "(Artetxe et al., 2018b", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 160, |
|
"end": 185, |
|
"text": "(Artetxe et al., ,a, 2019", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 186, |
|
"end": 209, |
|
"text": "Lample et al., 2018a,b)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The organizers prepared a shared task to explore machine translation on a real-life example of a lowresource language pair of German (de) and Upper Sorbian (hsb). There are around 60k authentic parallel sentences available for this language pair which is not sufficient to train a high-quality MT system in a standard supervised way, and calls for unsupervised pre-training (Conneau and Lample, 2019) , data augmentation by synthetically produced sentences (Sennrich et al., 2016a) or transfer learning from different language pairs (Zoph et al., 2016a; Kocmi and Bojar, 2018 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 374, |
|
"end": 400, |
|
"text": "(Conneau and Lample, 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 457, |
|
"end": 481, |
|
"text": "(Sennrich et al., 2016a)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 533, |
|
"end": 553, |
|
"text": "(Zoph et al., 2016a;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 554, |
|
"end": 575, |
|
"text": "Kocmi and Bojar, 2018", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The WMT20 shared task is divided into two tracks. In the unsupervised track, the participants are only allowed to use monolingual German and Upper Sorbian corpora to train their systems; the low-resource track permits the usage of auxiliary parallel corpora in other languages as well as a small parallel corpus in German-Upper Sorbian.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We participate in both tracks in both translation directions. Section 2 describes our participation in the unsupervised track and section 3 describes our systems from the low-resource track. Section 4 introduces transfer learning via Czech (cs) into our low-resource system. We conclude the paper in section 5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Unsupervised machine translation is the task of learning to translate without any parallel data resources at training time. Both neural and phrasebased systems were proposed to solve the task (Lample et al., 2018b) . In this work, we train several neural systems and compare the effects of different training approaches.", |
|
"cite_spans": [ |
|
{ |
|
"start": 192, |
|
"end": 214, |
|
"text": "(Lample et al., 2018b)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised MT", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The key concepts of unsupervised NMT include a shared encoder, shared vocabulary and model initialization (pre-training). The training relies only on monolingual corpora and switches between de-noising, where the model learns to reconstruct corrupted sentences, and online back-translation, where the model first translates a batch of sentences and immediately trains itself on the generated sentence pairs, using the standard cross-entropy MT objective (Artetxe et al., 2018b; Lample et al., 2018a) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 454, |
|
"end": 477, |
|
"text": "(Artetxe et al., 2018b;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 478, |
|
"end": 499, |
|
"text": "Lample et al., 2018a)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We use a 6-layer Transformer architecture for our unsupervised NMT models following the approach by Conneau and Lample (2019) . Both the encoder and the decoder are shared across languages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 100, |
|
"end": 125, |
|
"text": "Conneau and Lample (2019)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We first pre-train the encoder and the decoder separately on the task of cross-lingual masked language modelling (XLM) using monolingual Figure 1 : An overview of selected CUNI systems. Corpora are illustrated in gray boxes, system names in black boxes. Systems are trained with indicated training objectives: cross-lingual masked language modeling (XLM), denoising (DN), online back-translation (BT), and standard machine translation objective (MT). Monolingual training sets DE mono and HSB mono were available for both WMT20 task tracks, the parallel training set HSB\u2194DE auth was only allowed in the low-resource supervised track. data only (Conneau and Lample, 2019) . Subsequently, the initialized MT system (CUNI-Monolingual) is trained using de-noising and online back-translation. We then use this system to translate our entire monolingual corpus and train a new system (CUNI-Synthetic-I) from scratch on the two newly generated synthetic parallel corpora DE-HSB synth1 and HSB-DE synth1. Finally, we use the new system to generate DE-HSB synth2 and HSB-DE synth2, and repeat the training to evaluate the effect of another back-translation round (CUNI-Synthetic-II).", |
|
"cite_spans": [ |
|
{ |
|
"start": 644, |
|
"end": 670, |
|
"text": "(Conneau and Lample, 2019)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 137, |
|
"end": 145, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "All unsupervised systems are trained using the same BPE subword vocabulary (Sennrich et al., 2016b) with 61k items generated using fastBPE. 1 An overview of the systems and their training stages is given in fig. 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 75, |
|
"end": 99, |
|
"text": "(Sennrich et al., 2016b)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 207, |
|
"end": 213, |
|
"text": "fig. 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Our de training data comes from News Crawl; the hsb data was provided for WMT20 by the Sorbian Institute and the Witaj Sprachzentrum. 2 Most of the hsb data was of high quality but we fed the web-scraped corpus (web monolingual.hsb) through a language identification tool fastText 3 to identify proper hsb sentences. All de data was also cleaned using this tool.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The final monolingual training corpora have 1 https://github.com/glample/fastBPE 2 http://www.statmt.org/wmt20/unsup_ and_very_low_res/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "3 https://github.com/facebookresearch/ fastText/ 22.5M sentences (DE mono) and 0.6M sentences (HSB mono). Synthetic parallel corpora are generated from the monolingual data sets by coupling the sentences with their translation counterparts as described in section 2.1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The parallel development (dev) and testing (dev test) data sets of 2k sentence pairs provided by WMT20 organizers are used for parameter tuning and model selection. The final evaluation is run on the blind test set newstest2020.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The resulting scores measured on the blind new-stest2020 are listed in table 1 and table 2. The translation quality metrics BLEU (Papineni et al., 2002) , TER (Snover et al., 2006) , BEER (Stanojevi\u0107 and Sima'an, 2014) and CharacTER (Wang et al., 2016) provide consistent results. The best quality is reached when using synthetic corpora from the second back-translation iteration, although the second round adds only a slight improvement. A similar observation is made by Hoang et al. (2018) who show that the second round of back-translation does not enhance the system performance as much as the first round. Additionally, the third round does not produce any significant gains.", |
|
"cite_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 152, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 159, |
|
"end": 180, |
|
"text": "(Snover et al., 2006)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 188, |
|
"end": 218, |
|
"text": "(Stanojevi\u0107 and Sima'an, 2014)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 233, |
|
"end": 252, |
|
"text": "(Wang et al., 2016)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "When training on synthetic parallel corpora, it is still beneficial to perform back-translation on-thefly (Artetxe et al., 2018b) whereby new training instances of increasing quality are generated in every training step. This method adds 1 -2 BLEU points to the final score as compared to training only on sentence pairs from the two synthetic corpora so we use it in all our unsupervised systems. We used the XLM 4 toolkit for running the experiments. Language model pre-training took 4 days on 4 GPUs 5 . The translation models were trained on 1 GPU 6 with 8-step gradient accumulation to reach an effective batch size of 8 \u00d7 3400 tokens. We used the Adam (Kingma and Ba, 2015) optimizer with inverse square root decay (\u03b2 1 = 0.9, \u03b2 2 = 0.98, lr = 0.0001) and greedy decoding.", |
|
"cite_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 129, |
|
"text": "(Artetxe et al., 2018b)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "3 Very Low-Resource Supervised MT", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Our systems introduced in this section have the same model architecture as described in section 2, but now we allow the usage of authentic parallel data. We pre-train a bilingual XLM model and finetune with either only authentic parallel data (CUNI-Auth-w\\o-BT) or both parallel and monolingual data, using a combination of standard MT training and online back-translation (CUNI-Auth-w\\-BT). Finally, we utilize the trained model CUNI-Synthetic-II from section 2 and fine-tune it on the authentic parallel corpus, again using standard supervised training as well as online back-translation 4 https://github.com/facebookresearch/ XLM 5 GeForce GTX 1080, 11GB of RAM 6 Quadro P5000, 16GB of RAM (CUNI-Synth+Authentic). All systems are trained with the same BPE subword vocabulary of 61k items.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In addition to the data described in section 2.2, we used the authentic parallel corpus of 60k sentence pairs provided by Witaj Sprachzentrum mostly from the legal and general domain.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The resulting scores are listed in the second part of table 1 and table 2. We compare system performance against a supervised baseline which is a vanilla NMT model trained only on the small parallel train set of 60k sentences, without any pretraining or data augmentation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Our best system gains 11.5 BLEU over this baseline, utilizing the larger monolingual corpora for XLM pre-training and online back-translation. Fine-tuning one of the trained unsupervised systems on parallel data leads to a lower gain of \u223c10 BLEU points over the baseline.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The translation models were trained on 1 GPU 7 with 8-step gradient accumulation to reach an effective batch size of 8 \u00d7 1600 tokens. Other training details are equivalent to section 2.1. Table 4 : Translation quality of de \u2192 hsb systems on newstest2020.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 188, |
|
"end": 195, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "One of the main approaches to improving performance under low-resource conditions is transferring knowledge from different high-resource language pairs (Zoph et al., 2016b; Kocmi and Bojar, 2018) . This section describes the unmodified strategy for transfer learning as presented by Kocmi and Bojar (2018) , using German-Czech as the parent language pair. Since we do not modify the approach nor tune hyperparameters of the NMT model, we consider our system a transfer learning baseline for low-resource supervised machine translation. Kocmi and Bojar (2018) proposed an approach to fine-tune a low-resource language pair (called \"child\") from a pre-trained high-resource language pair (called \"parent\") model. The method has only one restriction and that is a shared subword vocabulary generated from the corpora of both the child and the parent. The training procedure is as follows: first train an NMT model on the parent parallel corpus until it converges, then replace the training data with the child corpus. We use the Tensor2Tensor framework (Vaswani et al., 2018) for our transfer learning baseline and model parameters \"Transformer-big\" as described in (Vaswani et al., 2018) . Our shared vocabulary has 32k wordpiece tokens. We use the Adafactor (Shazeer and Stern, 2018) optimizer and a reverse square root decay with 16 000 warm-up steps. For the inference, we use beam search of size 8 and alpha 0.8.", |
|
"cite_spans": [ |
|
{ |
|
"start": 152, |
|
"end": 172, |
|
"text": "(Zoph et al., 2016b;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 173, |
|
"end": 195, |
|
"text": "Kocmi and Bojar, 2018)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 283, |
|
"end": 305, |
|
"text": "Kocmi and Bojar (2018)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 536, |
|
"end": 558, |
|
"text": "Kocmi and Bojar (2018)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1050, |
|
"end": 1072, |
|
"text": "(Vaswani et al., 2018)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1163, |
|
"end": 1185, |
|
"text": "(Vaswani et al., 2018)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1257, |
|
"end": 1282, |
|
"text": "(Shazeer and Stern, 2018)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Very Low-Resource Supervised MT with Transfer Learning", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In addition to the data described in section 3.2, we used the cs-de parallel corpora available at the OPUS 8 website: OpenSubtitles, MultiParaCrawl, Europarl, EUBookshop, DGT, EMEA and JRC. The cs-de corpus has 21.4M sentence pairs after cleaning with the fastText language identification tool.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We compare the results of our transfer learning baseline called CUNI-Transfer with three top performing systems of WMT20. These systems use state-of-the-art techniques such as BPE-dropout, ensembling of models, cross-lingual language modelling, filtering of training data and hyperparameter tuning. Additionally, we also include results for a system we trained without any modifications solely on bilingual parallel data (Bilingual only). 9", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The results in table 4 show that training solely on German-Upper Sorbian parallel data leads to a performance of 47.8 BLEU for de\u2192hsb and 46.7 BLEU for hsb\u2192de. When using transfer learning with a Czech-German parent, the performance increases by roughly 10 BLEU points to 57.4 and 56.1 BLEU. As demonstrated by the winning system, the performance can be further boosted using additional techniques and approaches to 60.0 and 61.1 BLEU. This shows that transfer learning plays an important role in the low-resource scenario.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We participated in the unsupervised and lowresource supervised translation task of WMT20.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In the fully unsupervised scenario, the best scores of 25.5 (hsb\u2192de) and 23.7 (de\u2192hsb) were achieved using cross-lingual language model pre-training (XLM) and training on synthetic data produced by NMT models from earlier two iterations. We submitted this system under the name CUNI-Synthetic-II.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In the low-resource supervised scenario, the best scores of 57.4 (hsb\u2192de) and 56.1 (de\u2192hsb) were achieved by pre-training on a large German-Czech parallel corpus and fine-tuning on the available German-Upper Sorbian parallel corpus. We submitted this system under the name CUNI-Transfer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We showed that transfer learning plays an important role in the low-resource scenario, bringing an improvement of \u223c10 BLEU points over a vanilla supervised MT model trained on the small parallel data only. Additional techniques used by other competing teams yield further improvements of up to 4 BLEU over our transfer learning baseline.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "GeForce GTX 1080 Ti, 11GB of RAM", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://opus.nlpl.eu/ 9 The model Bilingual only is trained on the same data as CUNI-Supervised-Baseline but uses a different architecture and decoding parameters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This study was supported in parts by the grants 19-26934X and 18-24210S of the Czech Science Foundation, SVV 260 575 and GAUK 1050119 of the Charles University. This work has been using language resources and tools stored and distributed by the LINDAT/CLARIN project of the Ministry of Education, Youth and Sports of the Czech Republic (LM2018101).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Unsupervised statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Mikel", |
|
"middle": [], |
|
"last": "Artetxe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gorka", |
|
"middle": [], |
|
"last": "Labaka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2018a. Unsupervised statistical machine transla- tion. In Proceedings of the 2018 Conference on EMNLP, Brussels. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "An effective approach to unsupervised machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Mikel", |
|
"middle": [], |
|
"last": "Artetxe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gorka", |
|
"middle": [], |
|
"last": "Labaka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the ACL, Florence. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1019" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2019. An effective approach to unsupervised machine translation. In Proceedings of the 57th Annual Meet- ing of the ACL, Florence. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Unsupervised neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Mikel", |
|
"middle": [], |
|
"last": "Artetxe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gorka", |
|
"middle": [], |
|
"last": "Labaka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Sixth International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mikel Artetxe, Gorka Labaka, Eneko Agirre, and Kyunghyun Cho. 2018b. Unsupervised neural ma- chine translation. In Proceedings of the Sixth Inter- national Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Crosslingual language model pretraining", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "7059--7069", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau and Guillaume Lample. 2019. Cross- lingual language model pretraining. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d' Alch\u00e9-Buc, E. Fox, and R. Garnett, editors, Advances in Neu- ral Information Processing Systems 32, pages 7059- 7069. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Iterative backtranslation for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Duy", |
|
"middle": [], |
|
"last": "Vu Cong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gholamreza", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Haffari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cohn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2nd Workshop on Neural Machine Translation and Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vu Cong Duy Hoang, Philipp Koehn, Gholamreza Haffari, and Trevor Cohn. 2018. Iterative back- translation for neural machine translation. In Pro- ceedings of the 2nd Workshop on Neural Machine Translation and Generation.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 3rd International Conference for Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In Proceedings of the 3rd International Conference for Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Trivial transfer learning for low-resource neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Kocmi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Bojar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Third Conference on Machine Translation: Research Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-6325" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom Kocmi and Ond\u0159ej Bojar. 2018. Trivial transfer learning for low-resource neural machine translation. In Proceedings of the Third Conference on Machine Translation: Research Papers, Brussels. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Unsupervised machine translation using monolingual corpora only", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ludovic", |
|
"middle": [], |
|
"last": "Denoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc'aurelio", |
|
"middle": [], |
|
"last": "Ranzato", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "6th International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample, Ludovic Denoyer, and Marc'Aurelio Ranzato. 2018a. Unsupervised machine translation using monolingual corpora only. In 6th International Conference on Learning Representations (ICLR 2018).", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Phrase-based & neural unsupervised machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ludovic", |
|
"middle": [], |
|
"last": "Denoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc'aurelio", |
|
"middle": [], |
|
"last": "Ranzato", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample, Myle Ott, Alexis Conneau, Lu- dovic Denoyer, and Marc'Aurelio Ranzato. 2018b. Phrase-based & neural unsupervised machine trans- lation. In Proceedings of the 2018 Conference on EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of 40th", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1073083.1073135" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic evalu- ation of machine translation. In Proceedings of 40th", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Annual Meeting of the ACL, Philadelphia. Association for Computational Linguistics", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Annual Meeting of the ACL, Philadelphia. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Improving neural machine translation models with monolingual data", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the ACL", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-1009" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016a. Improving neural machine translation mod- els with monolingual data. In Proceedings of the 54th Annual Meeting of the ACL (Volume 1: Long Papers), Berlin, Germany. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Neural machine translation of rare words with subword units", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016b. Neural machine translation of rare words with subword units. In Proceedings of the 54th An- nual Meeting of the ACL, Berlin. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Adafactor: Adaptive learning rates with sublinear memory cost", |
|
"authors": [ |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mitchell", |
|
"middle": [], |
|
"last": "Stern", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 35th International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Noam Shazeer and Mitchell Stern. 2018. Adafactor: Adaptive learning rates with sublinear memory cost. In Proceedings of the 35th International Conference on Machine Learning, ICML 2018.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "A study of translation edit rate with targeted human annotation", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Snover", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bonnie", |
|
"middle": [], |
|
"last": "Dorr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linnea", |
|
"middle": [], |
|
"last": "Micciulla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Makhoul", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 7th of the AMTA, Cambridge. Association for Machine Translation in the Americas", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Snover, Bonnie Dorr, Richard Schwartz, Lin- nea Micciulla, and John Makhoul. 2006. A study of translation edit rate with targeted human annotation. In Proceedings of the 7th of the AMTA, Cambridge. Association for Machine Translation in the Ameri- cas.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Fitting sentence level translation evaluation with many dense features", |
|
"authors": [ |
|
{ |
|
"first": "Milo\u0161", |
|
"middle": [], |
|
"last": "Stanojevi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Khalil", |
|
"middle": [], |
|
"last": "Sima'an", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/D14-1025" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Milo\u0161 Stanojevi\u0107 and Khalil Sima'an. 2014. Fit- ting sentence level translation evaluation with many dense features. In Proceedings of the 2014 Confer- ence on EMNLP, Doha, Qatar. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Tensor2tensor for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samy", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eugene", |
|
"middle": [], |
|
"last": "Brevdo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francois", |
|
"middle": [], |
|
"last": "Chollet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Gouws", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nal", |
|
"middle": [], |
|
"last": "Kalchbrenner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Sepassi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 13th Conference of the AMTA", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Samy Bengio, Eugene Brevdo, Fran- cois Chollet, Aidan Gomez, Stephan Gouws, Llion Jones, Lukasz Kaiser, Nal Kalchbrenner, Niki Par- mar, Ryan Sepassi, Noam Shazeer, and Jakob Uszko- reit. 2018. Tensor2tensor for neural machine trans- lation. In Proceedings of the 13th Conference of the AMTA (Volume 1: Research Papers), Boston, MA. Association for Machine Translation in the Ameri- cas.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "CharacTer: Translation edit rate on character level", |
|
"authors": [ |
|
{ |
|
"first": "Weiyue", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan-Thorsten", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hendrik", |
|
"middle": [], |
|
"last": "Rosendahl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the First Conference on Machine Translation", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W16-2342" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Weiyue Wang, Jan-Thorsten Peter, Hendrik Rosendahl, and Hermann Ney. 2016. CharacTer: Translation edit rate on character level. In Proceedings of the First Conference on Machine Translation: Volume 2, Shared Task Papers, Berlin, Germany. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Transfer learning for low-resource neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Barret", |
|
"middle": [], |
|
"last": "Zoph", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deniz", |
|
"middle": [], |
|
"last": "Yuret", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "May", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1163" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Barret Zoph, Deniz Yuret, Jonathan May, and Kevin Knight. 2016a. Transfer learning for low-resource neural machine translation. In Proceedings of the 2016 Conference on EMNLP, Austin, Texas. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Transfer learning for low-resource neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Barret", |
|
"middle": [], |
|
"last": "Zoph", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deniz", |
|
"middle": [], |
|
"last": "Yuret", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "May", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Barret Zoph, Deniz Yuret, Jonathan May, and Kevin Knight. 2016b. Transfer learning for low-resource neural machine translation. In Proceedings of the 2016 Conference on EMNLP, Austin, Texas. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td>newstest2020</td><td>dev test set</td></tr></table>", |
|
"text": "Translation quality of the unsupervised (a) and low-resource supervised (b) hsb \u2192 de systems on newstest2020 and the unofficial test set. The asterisk * indicates systems submitted into WMT20.", |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"num": null, |
|
"content": "<table/>", |
|
"text": "", |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td>System Name</td><td colspan=\"5\">BLEU BLEU-cased TER BEER 2.0 CharacTER</td></tr><tr><td>SJTU-NICT</td><td>61.1</td><td>60.7</td><td>0.283</td><td>0.759</td><td>0.250</td></tr><tr><td>Helsinki-NLP</td><td>58.4</td><td>57.9</td><td>0.297</td><td>0.755</td><td>0.255</td></tr><tr><td>NRC-CNRC</td><td>57.7</td><td>57.3</td><td>0.300</td><td>0.754</td><td>0.255</td></tr><tr><td>CUNI-Transfer</td><td>56.1</td><td>55.5</td><td>0.315</td><td>0.743</td><td>0.265</td></tr><tr><td>Bilingual only</td><td>46.8</td><td>46.4</td><td>0.389</td><td>0.692</td><td>0.335</td></tr></table>", |
|
"text": "Translation quality of hsb \u2192 de systems on newstest2020.", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |