|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T04:34:01.914359Z" |
|
}, |
|
"title": "An Error-based Investigation of Statistical and Neural Machine Translation Performance on Hindi-to-Tamil and English-to-Tamil", |
|
"authors": [ |
|
{ |
|
"first": "Akshai", |
|
"middle": [], |
|
"last": "Ramesh", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Dublin City University", |
|
"location": { |
|
"settlement": "Dublin", |
|
"country": "Ireland" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Venkatesh", |
|
"middle": [ |
|
"Balavadhani" |
|
], |
|
"last": "Parthasarathy", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Dublin City University", |
|
"location": { |
|
"settlement": "Dublin", |
|
"country": "Ireland" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Rejwanul", |
|
"middle": [], |
|
"last": "Haque", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Dublin City University", |
|
"location": { |
|
"settlement": "Dublin", |
|
"country": "Ireland" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Way", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Dublin City University", |
|
"location": { |
|
"settlement": "Dublin", |
|
"country": "Ireland" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Statistical machine translation (SMT) was the state-of-the-art in machine translation (MT) research for more than two decades, but has since been superseded by neural MT (NMT). Despite producing state-of-the-art results in many translation tasks, neural models underperform in resource-poor scenarios. Despite some success, none of the present-day benchmarks that have tried to overcome this problem can be regarded as a universal solution to the problem of translation of many low-resource languages. In this work, we investigate the performance of phrasebased SMT (PB-SMT) and NMT on two rarelytested low-resource language-pairs, English-to-Tamil and Hindi-to-Tamil, taking a specialised data domain (software localisation) into consideration. This paper demonstrates our findings including the identification of several issues of the current neural approaches to low-resource domain-specific text translation.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Statistical machine translation (SMT) was the state-of-the-art in machine translation (MT) research for more than two decades, but has since been superseded by neural MT (NMT). Despite producing state-of-the-art results in many translation tasks, neural models underperform in resource-poor scenarios. Despite some success, none of the present-day benchmarks that have tried to overcome this problem can be regarded as a universal solution to the problem of translation of many low-resource languages. In this work, we investigate the performance of phrasebased SMT (PB-SMT) and NMT on two rarelytested low-resource language-pairs, English-to-Tamil and Hindi-to-Tamil, taking a specialised data domain (software localisation) into consideration. This paper demonstrates our findings including the identification of several issues of the current neural approaches to low-resource domain-specific text translation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In recent years, MT researchers have proposed approaches to counter the data sparsity problem and to improve the performance of NMT systems in lowresource scenarios, e.g. augmenting training data from source and/or target monolingual corpora (Sennrich et al., 2016a; Chen et al., 2019) , unsupervised learning strategies in the absence of labeled data (Artetxe et al., 2018; Lample et al., 2018) , exploiting training data involving other languages (Firat et al., 2017; Johnson et al., 2017) , multi-task learning (Niehues and Cho, 2017) , selection of hyperparameters (Sennrich and Zhang, 2019) , and pre-trained language model fine-tuning (Liu et al., 2020) . Despite some success, none of the existing benchmarks can be viewed as an overall solution as far as MT for low-resource language-pairs is concerned. For examples, the back-translation strategy of Sennrich et al. (2016a) is less effective in low-resource settings where it is hard to train a good back-translation model (Currey et al., 2017) ; unsupervised MT does not work well for distant languages (Marie and Fujita, 2018) due to the difficulty of training unsupervised cross-lingual word embeddings for such languages (S\u00f8gaard et al., 2018) and the same is applicable in the case of transfer learning too (Montoya et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 242, |
|
"end": 266, |
|
"text": "(Sennrich et al., 2016a;", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 267, |
|
"end": 285, |
|
"text": "Chen et al., 2019)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 352, |
|
"end": 374, |
|
"text": "(Artetxe et al., 2018;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 375, |
|
"end": 395, |
|
"text": "Lample et al., 2018)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 449, |
|
"end": 469, |
|
"text": "(Firat et al., 2017;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 470, |
|
"end": 491, |
|
"text": "Johnson et al., 2017)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 514, |
|
"end": 537, |
|
"text": "(Niehues and Cho, 2017)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 569, |
|
"end": 595, |
|
"text": "(Sennrich and Zhang, 2019)", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 641, |
|
"end": 659, |
|
"text": "(Liu et al., 2020)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 859, |
|
"end": 882, |
|
"text": "Sennrich et al. (2016a)", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 982, |
|
"end": 1003, |
|
"text": "(Currey et al., 2017)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1063, |
|
"end": 1087, |
|
"text": "(Marie and Fujita, 2018)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 1184, |
|
"end": 1206, |
|
"text": "(S\u00f8gaard et al., 2018)", |
|
"ref_id": "BIBREF54" |
|
}, |
|
{ |
|
"start": 1271, |
|
"end": 1293, |
|
"text": "(Montoya et al., 2019)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To this end, we investigate the performance of PB-SMT and NMT systems on two rarely-tested under-resourced language-pairs, English-to-Tamil and Hindi-to-Tamil, taking a specialised data domain (software localisation) into account. In this context, in Ramesh et al. (2020) , we investigated the performance of PB-SMT, NMT and a commercial MT system (Google Translate (GT)) 1 on English-to-Tamil taking the software localisation data into account, i.e. the same data as the one used in this work. In particular, in Ramesh et al. (2020) , we produced rankings of the MT systems (PB-SMT, NMT and GT) via a social media platform-based human evaluation scheme, and demonstrate our findings in this lowresource domain-specific text translation task. The next section talks about some of the papers that compared PB-SMT and NMT on a variety of use-cases.", |
|
"cite_spans": [ |
|
{ |
|
"start": 251, |
|
"end": 271, |
|
"text": "Ramesh et al. (2020)", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 513, |
|
"end": 533, |
|
"text": "Ramesh et al. (2020)", |
|
"ref_id": "BIBREF45" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The remainder of the paper is organized as follows. In Section 2, we discuss related work. Section 3 explains the experimental setup including the descriptions of our MT systems and details of the data sets used. Section 4 presents the results with discussions and analysis, while Section 5 concludes our work with avenues for future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The advent of NMT in MT research has led researchers to investigate how NMT is better (or worse) than PB-SMT. This section presents some of the papers that compared PB-SMT and NMT on a variety of use-cases. Although our primary objective of this work is to study translations of the MT systems (PB-SMT and NMT) in under-resourced conditions, we provide a brief overview on some of the papers that compared PB-SMT and NMT on highresource settings too.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Junczys-Dowmunt et al. (2016) compare PB-SMT and NMT on a range of translation-pairs and show that for all translation directions NMT is either on par with or surpasses PB-SMT. Bentivogli et al. (2016) analyse the output of MT systems in an Englishto-German translation task by considering different linguistic categories. conduct an evaluation to compare NMT and PB-SMT outputs across broader aspects (e.g. fluency, reordering) for 9 language directions. Castilho et al. (2017) conduct an extensive qualitative and quantitative comparative evaluation of PB-SMT and NMT using automatic metrics and professional translators. Popovi\u0107 (2017) carries out an extensive comparison between NMT and PB-SMT languagerelated issues for the German-English language pair in both translation directions. These works (Bentivogli et al., 2016; Castilho et al., 2017; Popovi\u0107, 2017; show that NMT provides better translation quality than the previous state-of-the-art PB-SMT. This trend continues in other studies and use-cases: translation of literary text (Toral and Way, 2018) , MT post-editing setups (Specia et al., 2017) , industrial setups (Shterionov et al., 2017) , translation of patent documents (Long et al., 2016; Kinoshita et al., 2017) , less-explored language pairs (Klubi\u010dka et al., 2017 (Klubi\u010dka et al., , 2018 , highly investigated \"easy\" translation pairs (Isabelle et al., 2017) , and translation of catalogues of technical tools (Beyer et al., 2017) . An opposite picture is also seen in the case of translation of the domain text; Nunez et al. (2019) showed PB-SMT outperforms NMT when translating user-generated content.", |
|
"cite_spans": [ |
|
{ |
|
"start": 177, |
|
"end": 201, |
|
"text": "Bentivogli et al. (2016)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 456, |
|
"end": 478, |
|
"text": "Castilho et al. (2017)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 624, |
|
"end": 638, |
|
"text": "Popovi\u0107 (2017)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 802, |
|
"end": 827, |
|
"text": "(Bentivogli et al., 2016;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 828, |
|
"end": 850, |
|
"text": "Castilho et al., 2017;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 851, |
|
"end": 865, |
|
"text": "Popovi\u0107, 2017;", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 1041, |
|
"end": 1062, |
|
"text": "(Toral and Way, 2018)", |
|
"ref_id": "BIBREF58" |
|
}, |
|
{ |
|
"start": 1088, |
|
"end": 1109, |
|
"text": "(Specia et al., 2017)", |
|
"ref_id": "BIBREF55" |
|
}, |
|
{ |
|
"start": 1130, |
|
"end": 1155, |
|
"text": "(Shterionov et al., 2017)", |
|
"ref_id": "BIBREF51" |
|
}, |
|
{ |
|
"start": 1190, |
|
"end": 1209, |
|
"text": "(Long et al., 2016;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 1210, |
|
"end": 1233, |
|
"text": "Kinoshita et al., 2017)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1265, |
|
"end": 1287, |
|
"text": "(Klubi\u010dka et al., 2017", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1288, |
|
"end": 1312, |
|
"text": "(Klubi\u010dka et al., , 2018", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1360, |
|
"end": 1383, |
|
"text": "(Isabelle et al., 2017)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1435, |
|
"end": 1455, |
|
"text": "(Beyer et al., 2017)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1538, |
|
"end": 1557, |
|
"text": "Nunez et al. (2019)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The MT researchers have tested and compared PB-SMT and NMT in the resource-poor settings too. Koehn and Knowles (2017) , \u00d6stling and Tiedemann (2017) , and Dowling et al. (2018) found that PB-SMT can provide better translations than NMT in low-resource scenarios. In contrast to these findings, however, many studies have demonstrated that NMT is better than PB-SMT in low-resource situations (Casas et al., 2019; Sennrich and Zhang, 2019) . Hence, the findings of this line of MT research have yielded indeed a mixed bag of results, where way ahead unclear. This work investigates translations of a software localisation text with two low-resource translation-pairs, Hindi-to-Tamil and English-to-Tamil, taking two MT paradigms, PB-SMT and NMT, into account.", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 118, |
|
"text": "Koehn and Knowles (2017)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 121, |
|
"end": 149, |
|
"text": "\u00d6stling and Tiedemann (2017)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 156, |
|
"end": 177, |
|
"text": "Dowling et al. (2018)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 393, |
|
"end": 413, |
|
"text": "(Casas et al., 2019;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 414, |
|
"end": 439, |
|
"text": "Sennrich and Zhang, 2019)", |
|
"ref_id": "BIBREF50" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To build our PB-SMT systems we used the Moses toolkit (Koehn et al., 2007) . We used a 5-gram language model trained with modified Kneser-Ney smoothing (Kneser and Ney, 1995) . Our PB-SMT log-linear features include: (a) 4 translational features (forward and backward phrase and lexical probabilities), (b) 8 lexicalised reordering probabilities (wbe-mslr-bidirectional-fe-allff ), (c) 5-gram LM probabilities, (d) 5 OSM features (Durrani et al., 2011) , and (e) word-count and distortion penalties. The weights of the parameters are optimized using the margin-infused relaxed algorithm (Cherry and Foster, 2012) on the development set. For decoding, the cube-pruning algorithm (Huang and Chiang, 2007) is applied, with a distortion limit of 12.", |
|
"cite_spans": [ |
|
{ |
|
"start": 54, |
|
"end": 74, |
|
"text": "(Koehn et al., 2007)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 152, |
|
"end": 174, |
|
"text": "(Kneser and Ney, 1995)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 430, |
|
"end": 452, |
|
"text": "(Durrani et al., 2011)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 587, |
|
"end": 612, |
|
"text": "(Cherry and Foster, 2012)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 678, |
|
"end": 702, |
|
"text": "(Huang and Chiang, 2007)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The MT systems", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To build our NMT systems, we used the Open-NMT toolkit (Klein et al., 2017) . The NMT systems are Transformer models (Vaswani et al., 2017) . The tokens of the training, evaluation and validation sets are segmented into sub-word units using Byte-Pair Encoding (BPE) (Sennrich et al., 2016b) . Recently, Sennrich and Zhang (2019) demonstrated that commonly used hyper-parameters configuration do not provide the best results in low-resource settings. Accordingly, we carried out a series of experiments in order to find the best hyperparameter configurations for Transformer in our low-resource settings. In particular, we played with some of the hyperparameters, and found that the following configuration lead to the best results in our low-resource translation settings: (i) the BPE vocabulary size: 8,000, (ii) the sizes of encoder and decoder layers: 4 and 6, respectively, (iii) learning-rate: 0.0005, (iv) batch size (token): 4,000, and (v) Transformer head size: 4. As for the remaining hyperparameters, we followed the recommended best set-up from Vaswani et al. (2017) . The validation on development set is performed using three cost functions: cross-entropy, perplexity and BLEU (Papineni et al., 2002) . The early stopping criteria is based on cross-entropy; however, the final NMT system is selected as per highest BLEU score on the validation set. The beam size for search is set to 12.", |
|
"cite_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 75, |
|
"text": "(Klein et al., 2017)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 117, |
|
"end": 139, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 266, |
|
"end": 290, |
|
"text": "(Sennrich et al., 2016b)", |
|
"ref_id": "BIBREF49" |
|
}, |
|
{ |
|
"start": 303, |
|
"end": 328, |
|
"text": "Sennrich and Zhang (2019)", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 1056, |
|
"end": 1077, |
|
"text": "Vaswani et al. (2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1190, |
|
"end": 1213, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The MT systems", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In order to test MT on low-resource scenarios, we chose English and two Indian languages: Hindi, and Tamil. English, Hindi, and Tamil are Germanic, Indo-Aryan and Dravidian languages, respectively, so the languages we selected for investigation are from different language families and morphologically divergent to each other. English is a less inflected language, whereas Hindi and Tamil are morphologically rich and highly inflected languages. Our first investigation is from a less inflected language to a highly inflected language (i.e. Englishto-Tamil), and the second one is between two morphologically complex and inflected languages (i.e. Hindi-to-Tamil). Thus, we compare translation in PB-SMT and NMT with two difficult translationpairs involving three morphologically divergent languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Choice of Languages", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "This section presents our datasets. For experimentation we used data from three different sources: OPUS 2 (Tiedemann, 2012) , WikiMatrix 3 (Schwenk et al., 2019) and PMIndia 4 (Haddow and Kirefu, 2020). As mentioned above, we carried out experiments on two translation-pairs, English-to-Tamil and Hindi-to-Tamil, and study translation of a specialised domain data, i.e. software localisation. Corpus statistics are shown in Table 1 . We carried out experiments using two different setups: (i) in the first setup, the MT systems were built on a training set compiled from all data domains listed above; we call this setup MIXED, and (ii) in the second setup, the MT systems were built on a training set compiled only from different software localisation data from OPUS, viz. GNOME, KDE4 and Ubuntu; we call this setup IT. The development and test set sentences were randomly drawn from these localisation corpora. As can be seen from Table 1 , the number of training set sentences of the Hindi-to-Tamil task is less than half of that of the training set size of the English-to-Tamil task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 123, |
|
"text": "(Tiedemann, 2012)", |
|
"ref_id": "BIBREF56" |
|
}, |
|
{ |
|
"start": 139, |
|
"end": 161, |
|
"text": "(Schwenk et al., 2019)", |
|
"ref_id": "BIBREF47" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 424, |
|
"end": 431, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
}, |
|
{ |
|
"start": 933, |
|
"end": 940, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Used", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In order to remove noise from the data sets, we adopted the following measures. We observed that the corpora of one language (say, Hindi) contains sentences of other languages (e.g. English), so we use a language identifier 5 in order to remove such noise. Then, we adopted a number of standard cleaning routines for removing noisy sentences, e.g. removing sentence-pairs that are too short, too long or which violate certain sentence-length ratios. In order to perform tokenisation for English, we used the standard tool in the Moses toolkit. For tokenising and normalising Hindi and Tamil sentences, we used the Indic NLP library. 6 Without a doubt, BPE is seen as the benchmark strategy for reducing data sparsity for NMT. We built our NMT engines on both word and subword-level training corpora in order to test BPE's effectiveness on low-resource translation tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Used", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We present the comparative performance of the PB-SMT and NMT systems in terms of the widely used automatic evaluation metric BLEU. Additionally, we performed statistical significance tests using bootstrap resampling methods (Koehn, 2004) . Sections 4.1.1 and 4.1.2 present the performance of the MT systems on the MIXED and IT setups, respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 224, |
|
"end": 237, |
|
"text": "(Koehn, 2004)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automatic Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We show the BLEU scores on the test set in Table 2 . The first and second rows of the table represent the English-to-Tamil and Hindi-to-Tamil translation tasks, respectively. 7 The PB-SMT and NMT systems produce relatively low BLEU scores on the test set given the difficulty of the translation pairs. However, these BLEU scores underestimate the translation quality, given the relatively free word order in Tamil, and the fact that we have just a single reference translation set for evaluation. We see from Ta- ble 2 that PB-SMT surpassed NMT by a large margin in terms of BLEU in both the English-to-Tamil and Hindi-to-Tamil translation tasks, and found that the differences in the BLEU scores are statistically significant.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 43, |
|
"end": 51, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The MIXED Setup", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "This section presents the results obtained on the IT setup. The BLEU scores of the MT systems are reported in Table 3 . When we compare the BLEU scores of this table with those of Table 2 , we see a huge rise in terms of the BLEU scores for PB-SMT and NMT as far as English-to-Tamil translation is concerned, and the improvements are found to be statistically significant. As for the Hindi-to-Tamil translation, we see a substantial deterioration in BLEU (an absolute difference of 1.36 points, a 24.9% relative loss in terms of BLEU) for PB-SMT. We found that this loss is statistically significant too. We also see that in this task the BLEU score of the NMT system is nearly identical to the one in the MIXED setup (2.12 BLEU points versus 2.10 BLEU points). As far as the English-to-Tamil translation and the IT setup are concerned, the PB-SMT system outperforms the NMT system statistically significantly, and we see an improvement of an absolute of 6.33 7 For both translation tasks we carried out a number of experiments by augmenting the training data from source and/or target monolingual corpora via forward-and back-translation (Sennrich et al., 2016a; Burlot and Yvon, 2018; Bogoychev and Sennrich, 2019) . We found that adding synthetic data via the forward-translation strategy hurts the MT system's performance, and the back-translation strategy brings about roughly similar BLEU scores.", |
|
"cite_spans": [ |
|
{ |
|
"start": 960, |
|
"end": 961, |
|
"text": "7", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1139, |
|
"end": 1163, |
|
"text": "(Sennrich et al., 2016a;", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 1164, |
|
"end": 1186, |
|
"text": "Burlot and Yvon, 2018;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1187, |
|
"end": 1216, |
|
"text": "Bogoychev and Sennrich, 2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 110, |
|
"end": 117, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 180, |
|
"end": 187, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The IT Setup", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "points (corresponding to 69.3% relative) in terms of BLEU on the test set. The same trend is seen in the Hindi-to-Tamil translation task too.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The IT Setup", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "We have a number of observations from the results of the MIXED and IT setups. As discussed in Section 3.3, in the IT task, the MT systems were built exclusively on in-domain training data, and in the MIXED setup, the training data is composed of a variety of domains, i.e. religious, IT, political news. Use of in-domain data only in training does not have any positive impact on the Hindi-to-Tamil translation, and we even saw a significant deterioration in performance on BLEU for PB-SMT. We conjecture that the morphological complexity of the languages (Hindi and Tamil) involved in this translation could be one of the reasons why the NMT and PB-SMT systems performed so poorly when trained exclusively on small-sized specialised domain data. When we compare PB-SMT and NMT, we see that PB-SMT is always the leading system in both the following cases: (i) across the training data setups (MIXED and IT) and (ii) the translation-directions (English-to-Tamil and Hindi-to-Tamil).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The IT Setup", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "The BLEU scores reported in the sections above are very low. We looked at the translations of the test set sentences by the MT systems and compare them with the reference translations. We found that despite being good in quality, in many cases the translations were penalised heavily by the BLEU metric as a result of many n-gram mismatches with the corresponding reference translations. This happened mainly due to the nature of target language (Tamil) in question, i.e. Tamil is a free word order language. This is indeed responsible for the increase in nonoverlapping n-gram counts. We also found that translations contain lexical variations of Tamil words of the reference translation, again resulting in the increase of the non-overlapping n-gram counts. We show such translations from the Hindi-to-Tamil task in Table 4 . We also reported this phenomenon in Ramesh et al. (2020) and showed such translations from the English-to-Tamil task (cf. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 864, |
|
"end": 884, |
|
"text": "Ramesh et al. (2020)", |
|
"ref_id": "BIBREF45" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 818, |
|
"end": 825, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Reasons for very low BLEU Scores", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We conducted a thorough error analysis of the English-to-Tamil and Hindi-to-Tamil NMT and PB-SMT systems built on the in-domain training data. For this, we randomly sampled 100 sentences from the respective test sets (English-to-Tamil and Hindi- to-Tamil). The outcome of this analysis is presented in the following sections.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Terminology translation is arguably viewed as one of the most challenging problems in MT (Dinu et al., 2019; Haque et al., 2019; Exel et al., 2020) . Since this work focuses on studying translation of data from a specialised domain, we looked at this area of translation with a special focus. We first looked at the translations of OOV terms in order to see how they are translated into the target. We found that both the NMT systems (English-to- Tamil We show four examples in Table 5 . In the first example, we show a source English sentence and its Tamil translation. We see from the translation that the NMT system drops the source-side terms 'ipod', 'iphone' and 'ipad' in the target translation. The SMT system translates the segment as 'most ipod, iphone'. In the second example, we see that a part ('Open') of a multiword term ('Open script') is cor-rectly translated into Tamil, and the NMT system omits its remaining part ('script') in translation. As for the SMT system, the source text is translated as 'opened script'. In the third example, we show another multiword English term ('Color set') and its Tamil translation (i.e. English equivalent 'set the color') by the NMT system, which is wrong. As for the SMT system, the source text is translated as 'set color'. Here, we see that both the MT systems made correct lexical choices for each word of the source term, although the meaning of the respective translation is different to that of the source term. This can be viewed as a cross-lingual disambiguation problem. In the fourth example, we show a single word source Hindi sentence ('Freecell') which is a term and name of a computer game. The Hindi-to-Tamil NMT system incorrectly translates this term into Tamil, and the English equivalent of the Tamil translation is in fact 'freebugs'. The translation of the fourth segment by the SMT system is its transliteration. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 89, |
|
"end": 108, |
|
"text": "(Dinu et al., 2019;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 109, |
|
"end": 128, |
|
"text": "Haque et al., 2019;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 129, |
|
"end": 147, |
|
"text": "Exel et al., 2020)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 447, |
|
"end": 452, |
|
"text": "Tamil", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 478, |
|
"end": 485, |
|
"text": "Table 5", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Terminology Translation", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "We observed that both NMT systems (English-to-Tamil and Hindi-to-Tamil) often make incorrect lexical selection for polysemous words, i.e. the NMT systems often produce a target translation of a word that has no connection with the underlying context of the source sentence in which the word appears. As an example, we show a Hindi sentence and its Tamil translation in Table 6 . The ambiguous word \u0939\u093e\u0932 ('haal') has three meanings in Hindi ('condition', 'recent' and 'hall') and their Tamil translations are different too. The Hindi-to-Tamil NMT system chooses the Tamil translation for the Hindi word \u0939\u093e\u0932 which is incorrect in the context of the source sentence. As for the SMT system, it translates the source text as \"names of games played recently\". It makes a correct lexical selection for the word in question.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 369, |
|
"end": 376, |
|
"text": "Table 6", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Lexical Selection", |
|
"sec_num": "4.3.2" |
|
}, |
|
{ |
|
"text": "We observed that the NMT systems occasionally commit reordering errors in translation. In Table 7 ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 90, |
|
"end": 97, |
|
"text": "Table 7", |
|
"ref_id": "TABREF11" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Wrong Word Order", |
|
"sec_num": "4.3.3" |
|
}, |
|
{ |
|
"text": "Haque et al. (2019) observed that NMT tends to omit more terms in translation than PB-SMT. We found that this is true in our case with non-term entities too as we observed that the NMT systems often omit words in the translations. As an example, in Table 8 , we show an English sentence, its Tamil translations and the English equivalents of the Tamil translations. We see from the table that the NMT system translates only the first word of the English sentence and drops the remainder of the sentence during translation, and the SMT system translates the first two words of the English sentence and drops the remainder of the sentence for translation. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 249, |
|
"end": 256, |
|
"text": "Table 8", |
|
"ref_id": "TABREF12" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Word Omission", |
|
"sec_num": "4.3.4" |
|
}, |
|
{ |
|
"text": "We report a few more erroneous translations by the Hindi-to-Tamil NMT system in Table 9 . The errors in these translations occur for a variety of reasons. The translations of the source sentences sometimes contain strange words that have no relation to the meaning of the source sentence. The top two example translations belong to this category. The translation of the first sentence by the SMT system is partially correct. As for the second example, the SMT system translates it as 'report' which is incorrect too.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 80, |
|
"end": 87, |
|
"text": "Table 9", |
|
"ref_id": "TABREF14" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Miscellaneous Errors", |
|
"sec_num": "4.3.5" |
|
}, |
|
{ |
|
"text": "We also see that the translations occasionally contain repetitions of other translated words. This repetition of words is seen only for the NMT system. The bottom two translation examples of Table 9 belong to this category. These findings are corroborated by some of the studies that pursued this line of research (e.g. Farajian et al. 2017). Unsurprisingly, such erroneous translations are seen more with the Hindito-Tamil translation direction. As for SMT, the MT system translates the third and fourth sentences incorrectly and correctly, respectively. In both cases, unlike NMT, the translations do not contain any repetition of other translated words. We sometimes found the appearance of one or more unexpected words in the translation, which completely changes the meaning of the translation, as shown in Table 10 . However, the SMT system correctly translates the first two source sentences shown in Table 10 . In the case of the third sentence, it translates the source sentence as 'move to trash'.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 191, |
|
"end": 198, |
|
"text": "Table 9", |
|
"ref_id": "TABREF14" |
|
}, |
|
{ |
|
"start": 812, |
|
"end": 820, |
|
"text": "Table 10", |
|
"ref_id": "TABREF0" |
|
}, |
|
{ |
|
"start": 908, |
|
"end": 916, |
|
"text": "Table 10", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Miscellaneous Errors", |
|
"sec_num": "4.3.5" |
|
}, |
|
{ |
|
"text": "We also observed that the translation-equivalents of some words are in fact the transliterations of the words themselves. We observed this happening only for the Englishto-Tamil direction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Miscellaneous Errors", |
|
"sec_num": "4.3.5" |
|
}, |
|
{ |
|
"text": "For example, the English word 'pixel' has a specific Tamil translation (i.e. \u0baa\u0b9f [pat \u0323attun \u0323ukku] ). However, the NMT system produces a transliterated form of that word in the target translation. In practice, many English words, especially terms or product names, are often directly used in Tamil text. Accordingly, we found the presence of transliterated forms of some words in the Tamil text of the training data. This could be the reason why the NMT systems generates such translations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 80, |
|
"end": 98, |
|
"text": "[pat \u0323attun \u0323ukku]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Miscellaneous Errors", |
|
"sec_num": "4.3.5" |
|
}, |
|
{ |
|
"text": "We saw in Section 4.1 that the BPE-based segmentation negatively impacts the translation between the two morphologically rich and complex languages, i.e. Hindi-to-Tamil. Since this segmentation process does not follow any linguistic rules and can abruptly segment a word at any character position, this may result in syntactic and morphological disagreements between the source-target sentence-pair and aligned words, respectively. We also observed that this may violate the underlying semantic agreement between the source-target sentence-pairs. As an example, we found that the BPE segmentation breaks the whose English equivalent is 'do not forget' which has no relation to \u0bb5 \u0ba3 \u0b95 [van \u0323nankal \u0323] (English equivalent: 'colors').", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The BPE segmentation on the Hindi-to-Tamil translation", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Unlike European languages, the Indian languages are usually fully phonetic with compulsory encoding of vowels. In our case, Hindi and Tamil differ a lot in terms of orthographic properties (e.g. different phonology, no schwa deletion in Tamil). The grammatical structures of Hindi and Tamil are different too, and they are morphologically divergent and from different language families. We saw that the BPE-based segmentation can completely change the underlying semantic agreements of the source and target sentences, which, in turn, may provide the learner with wrong (reasoning) knowledge about the sentence-pairs. This could be one of the reasons why the BPE-based NMT model is found to be underperforming in this translation task. This finding is corroborated by Banerjee and Bhattacharyya (2018) who in their work found that the Morfessorbased segmentation can yield better translation quality than the BPE-based segmentation for linguistically distant language-pairs, and other way round for the close language-pairs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 768, |
|
"end": 801, |
|
"text": "Banerjee and Bhattacharyya (2018)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The BPE segmentation on the Hindi-to-Tamil translation", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "In this paper, we investigated NMT and PB-SMT in resource-poor scenarios, choosing a specialised data domain (software localisation) for translation and two rarely-tested morphologically divergent language-pairs, Hindi-to-Tamil and English-to-Tamil. We studied translations on two setups, i.e. training data compiled from (i) freely available variety of data domains (e.g. political news, Wikipedia), and (ii) exclusively software localisation data domains. In addition to an automatic evaluation, we carried out a manual error analysis on the translations produced by our MT systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Use of in-domain data only at training has a positive impact on translation from a less inflected language to a highly inflected language, i.e. Englishto-Tamil. However, it does not impact the Hindi-to-Tamil translation. We conjecture that the morphological complexity of the source and target languages (Hindi and Tamil) involved in translation could be one of the reasons why the MT systems performed reasonably poorly even when they were exclusively trained on specialised domain data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We looked at the translations produced by our MT systems and found that in many cases, the BLEU scores underestimate the translation quality mainly due to relatively free word order in Tamil. In this context, Shterionov et al. (2018) computed the degree of underestimation in quality of three most-widely used automatic MT evaluation metrics: BLEU, METEOR (Banerjee and Lavie, 2005) and TER (Snover et al., 2006) , showing that for NMT, this may be up to 50%. We refer the interested readers to Way (2018 Way ( , 2019 who also drew attention to this phenomenon.", |
|
"cite_spans": [ |
|
{ |
|
"start": 209, |
|
"end": 233, |
|
"text": "Shterionov et al. (2018)", |
|
"ref_id": "BIBREF52" |
|
}, |
|
{ |
|
"start": 356, |
|
"end": 382, |
|
"text": "(Banerjee and Lavie, 2005)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 391, |
|
"end": 412, |
|
"text": "(Snover et al., 2006)", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 495, |
|
"end": 504, |
|
"text": "Way (2018", |
|
"ref_id": "BIBREF60" |
|
}, |
|
{ |
|
"start": 505, |
|
"end": 517, |
|
"text": "Way ( , 2019", |
|
"ref_id": "BIBREF61" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Our error analysis on the translations by the English-to-Tamil and Hindi-to-Tamil MT systems reveals many positive and negative sides of the two paradigms: PB-SMT and NMT: (i) NMT makes many mistakes when translating domain terms, and fails poorly when translating OOV terms, (ii) NMT often makes incorrect lexical selections for polysemous words and omits words and domain terms in translation, and occasionally commit reordering errors, and (iii) translations produced by the NMT systems occasionally contain repetitions of other translated words, strange translations and one or more unexpected words that have no connection with the source sentence. We observed that whenever the NMT system encounters a source sentence containing OOVs, it tends to produce one or more unexpected words or repetitions of other translated words. As for SMT, unlike NMT, the MT systems usually do not make such mistakes, i.e. repetitions, strange, spurious or unexpected words in translation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We observed that the BPE-based segmentation can completely change the underlying semantic agreements of the source and target sentences of the languages with greater morphological complexity. This could be one of the reasons why the Hindito-Tamil NMT system's translation quality is poor when the system is trained on the sub-word-level training data in comparison to one that was trained on the word-level training data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We believe that the findings of this work provide significant contributions to this line of MT research. In future, we intend to consider more languages from different language families. We also plan to judge errors in translations using the multidimensional quality metrics error annotation framework (Lommel et al., 2014) which is a widely-used standard translation quality assessment toolkit in the translation industry and in MT research. The MT evaluation metrics such as chrF (Popovi\u0107, 2015) which operates at the character level and COMET (Rei et al., 2020) which achieved new state-of-the-art performance on the WMT 2019 Metrics Shared Task (Ma et al., 2019) obtained high levels of correlation with human judgements. We intend to consider these metrics (chrF and COMET) in our future investigation. As in Exel et al. (2020) who examined terminology translation in NMT in an industrial setup while using the terminology integration approaches presented in Dinu et al. (2019) , we intend to investigate terminology translation in NMT using the MT models of Dinu et al. (2019) on English-to-Tamil and Hindi-to-Tamil.", |
|
"cite_spans": [ |
|
{ |
|
"start": 302, |
|
"end": 323, |
|
"text": "(Lommel et al., 2014)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 482, |
|
"end": 497, |
|
"text": "(Popovi\u0107, 2015)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 546, |
|
"end": 564, |
|
"text": "(Rei et al., 2020)", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 649, |
|
"end": 666, |
|
"text": "(Ma et al., 2019)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 814, |
|
"end": 832, |
|
"text": "Exel et al. (2020)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 964, |
|
"end": 982, |
|
"text": "Dinu et al. (2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1064, |
|
"end": 1082, |
|
"text": "Dinu et al. (2019)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "https://translate.google.com/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://opus.nlpl.eu/ 3 https://ai.facebook.com/blog/wikimatrix/ 4 http://data.statmt.org/pmindia 5 cld2: https://github.com/CLD2Owners/cld2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/anoopkunchukuttan/indic_ nlp_library", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The ADAPT Centre for Digital Content Technology is funded under the Science Foundation Ireland (SFI) Research Centres Programme (Grant No.13/RC/2106) and is co-funded under the European Regional Development Fund. This project has partially received funding from the European Union's Horizon 2020 research and innovation programme under the Marie Sk\u0142odowska-Curie grant agreement No. 713567, and the publication has emanated from research supported in part by a research grant from SFI under Grant Number 13/RC/2077.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Unsupervised statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Mikel", |
|
"middle": [], |
|
"last": "Artetxe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gorka", |
|
"middle": [], |
|
"last": "Labaka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3632--3642", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2018. Unsupervised statistical machine translation. In Pro- ceedings of the 2018 Conference on Empirical Meth- ods in Natural Language Processing, pages 3632- 3642, Brussels, Belgium.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "METEOR: An automatic metric for MT evaluation with improved correlation with human judgments", |
|
"authors": [ |
|
{ |
|
"first": "Satanjeev", |
|
"middle": [], |
|
"last": "Banerjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Lavie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "65--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Satanjeev Banerjee and Alon Lavie. 2005. METEOR: An automatic metric for MT evaluation with improved cor- relation with human judgments. In Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summariza- tion, pages 65-72, Ann Arbor, Michigan. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Meaningless yet meaningful: Morphology grounded subword-level NMT", |
|
"authors": [ |
|
{ |
|
"first": "Tamali", |
|
"middle": [], |
|
"last": "Banerjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Second Workshop on Subword/Character Level Models", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "55--60", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tamali Banerjee and Pushpak Bhattacharyya. 2018. Meaningless yet meaningful: Morphology grounded subword-level NMT. In Proceedings of the Second Workshop on Subword/Character Level Models, pages 55-60, New Orleans.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Neural versus phrase-based machine translation quality: a case study", |
|
"authors": [ |
|
{ |
|
"first": "Luisa", |
|
"middle": [], |
|
"last": "Bentivogli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arianna", |
|
"middle": [], |
|
"last": "Bisazza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mauro", |
|
"middle": [], |
|
"last": "Cettolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "257--267", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luisa Bentivogli, Arianna Bisazza, Mauro Cettolo, and Marcello Federico. 2016. Neural versus phrase-based machine translation quality: a case study. In Proceed- ings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 257-267, Austin, Texas.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Can out-of-the-box NMT beat a Domain-trained Moses on Technical Data?", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Anne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vivien", |
|
"middle": [], |
|
"last": "Beyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aljoscha", |
|
"middle": [], |
|
"last": "Macketanz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Burchardt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of EAMT User Studies and Project/Product Descriptions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "41--46", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anne M Beyer, Vivien Macketanz, Aljoscha Burchardt, and Philip Williams. 2017. Can out-of-the-box NMT beat a Domain-trained Moses on Technical Data? In Proceedings of EAMT User Studies and Project/Product Descriptions, pages 41-46, Prague, Czech Republic.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Domain, translationese and noise in synthetic data for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Nikolay", |
|
"middle": [], |
|
"last": "Bogoychev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1911.03362" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikolay Bogoychev and Rico Sennrich. 2019. Domain, translationese and noise in synthetic data for neural ma- chine translation. arXiv preprint arXiv:1911.03362.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Using monolingual data in neural machine translation: a systematic study", |
|
"authors": [ |
|
{ |
|
"first": "Franck", |
|
"middle": [], |
|
"last": "Burlot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fran\u00e7ois", |
|
"middle": [], |
|
"last": "Yvon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Third Conference on Machine Translation: Research Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "144--155", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-6315" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Franck Burlot and Fran\u00e7ois Yvon. 2018. Using monolin- gual data in neural machine translation: a systematic study. In Proceedings of the Third Conference on Ma- chine Translation: Research Papers, pages 144-155, Belgium, Brussels. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "The TALP-UPC machine translation systems for WMT19 news translation task: pivoting techniques for low resource MT", |
|
"authors": [ |
|
{ |
|
"first": "Noe", |
|
"middle": [], |
|
"last": "Casas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Jos\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Fonollosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Escolano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marta", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Basta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Costa-Juss\u00e0", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Conference on Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "155--162", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Noe Casas, Jos\u00e9 AR Fonollosa, Carlos Escolano, Chris- tine Basta, and Marta R Costa-juss\u00e0. 2019. The TALP- UPC machine translation systems for WMT19 news translation task: pivoting techniques for low resource MT. In Proceedings of the Fourth Conference on Ma- chine Translation, pages 155-162, Florence, Italy.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A Comparative Quality Evaluation of PBSMT and NMT using Professional Translators", |
|
"authors": [ |
|
{ |
|
"first": "Sheila", |
|
"middle": [], |
|
"last": "Castilho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joss", |
|
"middle": [], |
|
"last": "Moorkens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Federico", |
|
"middle": [], |
|
"last": "Gaspari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vilelmini", |
|
"middle": [], |
|
"last": "Sosoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Panayota", |
|
"middle": [], |
|
"last": "Georgakopoulou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pintu", |
|
"middle": [], |
|
"last": "Lohar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Way", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Valerio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miceli", |
|
"middle": [], |
|
"last": "Barone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Gialama", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of MT Summit XVI, the 16th Machine Translation Summit", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "116--131", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sheila Castilho, Joss Moorkens, Federico Gaspari, Rico Sennrich, Vilelmini Sosoni, Panayota Geor- gakopoulou, Pintu Lohar, Andy Way, Antonio Valerio, Miceli Barone, and Maria Gialama. 2017. A Compar- ative Quality Evaluation of PBSMT and NMT using Professional Translators. In Proceedings of MT Sum- mit XVI, the 16th Machine Translation Summit, pages 116-131, Nagoya, Japan.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Facebook AI's WAT19 Myanmar-English translation task submission", |
|
"authors": [ |
|
{ |
|
"first": "Peng-Jen", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiajun", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vishrav", |
|
"middle": [], |
|
"last": "Chaudhary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "El-Kishky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Wenzek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc'aurelio", |
|
"middle": [], |
|
"last": "Ranzato", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 6th Workshop on Asian Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "112--122", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peng-Jen Chen, Jiajun Shen, Matthew Le, Vishrav Chaud- hary, Ahmed El-Kishky, Guillaume Wenzek, Myle Ott, and Marc'Aurelio Ranzato. 2019. Facebook AI's WAT19 Myanmar-English translation task submission. In Proceedings of the 6th Workshop on Asian Transla- tion, pages 112-122, Hong Kong, China.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Batch tuning strategies for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Cherry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Foster", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "427--436", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Cherry and George Foster. 2012. Batch tuning strategies for statistical machine translation. In Pro- ceedings of the 2012 Conference of the North Ameri- can Chapter of the Association for Computational Lin- guistics: Human Language Technologies, pages 427- 436, Montr\u00e9al, Canada.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Copied monolingual data improves low-resource neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Currey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Valerio Miceli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Barone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Heafield", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Second Conference on Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "148--156", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-4715" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anna Currey, Antonio Valerio Miceli Barone, and Ken- neth Heafield. 2017. Copied monolingual data im- proves low-resource neural machine translation. In Proceedings of the Second Conference on Machine Translation, pages 148-156, Copenhagen, Denmark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Training neural machine translation to apply terminology constraints", |
|
"authors": [ |
|
{ |
|
"first": "Georgiana", |
|
"middle": [], |
|
"last": "Dinu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prashant", |
|
"middle": [], |
|
"last": "Mathur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaser", |
|
"middle": [], |
|
"last": "Al-Onaizan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3063--3068", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1294" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Georgiana Dinu, Prashant Mathur, Marcello Federico, and Yaser Al-Onaizan. 2019. Training neural machine translation to apply terminology constraints. In Pro- ceedings of the 57th Annual Meeting of the Associa- tion for Computational Linguistics, pages 3063-3068, Florence, Italy. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "SMT versus NMT: Preliminary comparisons for Irish", |
|
"authors": [ |
|
{ |
|
"first": "Meghan", |
|
"middle": [], |
|
"last": "Dowling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teresa", |
|
"middle": [], |
|
"last": "Lynn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alberto", |
|
"middle": [], |
|
"last": "Poncelas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Way", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the AMTA 2018 Workshop on Technologies for MT of Low Resource Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "12--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Meghan Dowling, Teresa Lynn, Alberto Poncelas, and Andy Way. 2018. SMT versus NMT: Preliminary com- parisons for Irish. In Proceedings of the AMTA 2018 Workshop on Technologies for MT of Low Resource Languages (LoResMT 2018), pages 12-20, Boston, MA.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "A joint sequence translation model with integrated reordering", |
|
"authors": [ |
|
{ |
|
"first": "Nadir", |
|
"middle": [], |
|
"last": "Durrani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Helmut", |
|
"middle": [], |
|
"last": "Schmid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Fraser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1045--1054", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nadir Durrani, Helmut Schmid, and Alexander Fraser. 2011. A joint sequence translation model with inte- grated reordering. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguis- tics: Human Language Technologies, pages 1045- 1054, Portland, Oregon, USA.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Terminology-constrained neural machine translation at SAP", |
|
"authors": [ |
|
{ |
|
"first": "Miriam", |
|
"middle": [], |
|
"last": "Exel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bianka", |
|
"middle": [], |
|
"last": "Buschbeck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lauritz", |
|
"middle": [], |
|
"last": "Brandt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simona", |
|
"middle": [], |
|
"last": "Doneva", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 22nd Annual Conference of the European Association for Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "271--280", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Miriam Exel, Bianka Buschbeck, Lauritz Brandt, and Si- mona Doneva. 2020. Terminology-constrained neural machine translation at SAP. In Proceedings of the 22nd Annual Conference of the European Association for Machine Translation, pages 271-280, Lisboa, Por- tugal. European Association for Machine Translation.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Neural vs. phrase-based machine translation in a multi-domain scenario", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Amin Farajian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Turchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matteo", |
|
"middle": [], |
|
"last": "Negri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Bertoldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "280--284", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Amin Farajian, Marco Turchi, Matteo Negri, Nicola Bertoldi, and Marcello Federico. 2017. Neural vs. phrase-based machine translation in a multi-domain scenario. In Proceedings of the 15th Conference of the European Chapter of the Association for Compu- tational Linguistics: Volume 2, Short Papers, pages 280-284, Valencia, Spain.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Multiway, multilingual neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Orhan", |
|
"middle": [], |
|
"last": "Firat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Baskaran", |
|
"middle": [], |
|
"last": "Sankaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fatos T Yarman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Vural", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Computer Speech & Language", |
|
"volume": "45", |
|
"issue": "", |
|
"pages": "236--252", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Orhan Firat, Kyunghyun Cho, Baskaran Sankaran, Fatos T Yarman Vural, and Yoshua Bengio. 2017. Multi- way, multilingual neural machine translation. Com- puter Speech & Language, 45:236-252.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "PMIndia-a collection of parallel corpora of languages of India", |
|
"authors": [ |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Faheem", |
|
"middle": [], |
|
"last": "Kirefu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Barry Haddow and Faheem Kirefu. 2020. PMIndia-a col- lection of parallel corpora of languages of India. arXiv preprint 2001.09907.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Investigating terminology translation in statistical and neural machine translation: A case study on English-to-Hindi and Hindi-to-English", |
|
"authors": [ |
|
{ |
|
"first": "Rejwanul", |
|
"middle": [], |
|
"last": "Haque", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammed", |
|
"middle": [], |
|
"last": "Hasanuzzaman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Way", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the International Conference on Recent Advances in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "437--446", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rejwanul Haque, Mohammed Hasanuzzaman, and Andy Way. 2019. Investigating terminology translation in statistical and neural machine translation: A case study on English-to-Hindi and Hindi-to-English. In Proceed- ings of the International Conference on Recent Ad- vances in Natural Language Processing, pages 437- 446, Varna, Bulgaria.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Forest rescoring: Faster decoding with integrated language models", |
|
"authors": [ |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Chiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 45th Annual Meeting of the Association of Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "144--151", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Liang Huang and David Chiang. 2007. Forest rescoring: Faster decoding with integrated language models. In Proceedings of the 45th Annual Meeting of the Asso- ciation of Computational Linguistics, pages 144-151, Prague, Czech Republic.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "A challenge set approach to evaluating machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Pierre", |
|
"middle": [], |
|
"last": "Isabelle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Cherry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Foster", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pierre Isabelle, Colin Cherry, and George F. Foster. 2017. A challenge set approach to evaluating machine trans- lation. CoRR, abs/1704.07431.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Google's multilingual neural machine translation system: Enabling zero-shot translation", |
|
"authors": [ |
|
{ |
|
"first": "Melvin", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxim", |
|
"middle": [], |
|
"last": "Krikun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonghui", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhifeng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikhil", |
|
"middle": [], |
|
"last": "Thorat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernanda", |
|
"middle": [], |
|
"last": "Vi\u00e9gas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Wattenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Macduff", |
|
"middle": [], |
|
"last": "Hughes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "339--351", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Melvin Johnson, Mike Schuster, Quoc V. Le, Maxim Krikun, Yonghui Wu, Zhifeng Chen, Nikhil Thorat, Fernanda Vi\u00e9gas, Martin Wattenberg, Greg Corrado, Macduff Hughes, and Jeffrey Dean. 2017. Google's multilingual neural machine translation system: En- abling zero-shot translation. Transactions of the As- sociation for Computational Linguistics, 5:339-351.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Is neural machine translation ready for deployment? a case study on 30 translation directions", |
|
"authors": [ |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Junczys-Dowmunt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomasz", |
|
"middle": [], |
|
"last": "Dwojak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1610.01108" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcin Junczys-Dowmunt, Tomasz Dwojak, and Hieu Hoang. 2016. Is neural machine translation ready for deployment? a case study on 30 translation directions. arXiv preprint arXiv:1610.01108.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Comparison of smt and nmt trained with large patent corpora: Japio at wat2017", |
|
"authors": [ |
|
{ |
|
"first": "Satoshi", |
|
"middle": [], |
|
"last": "Kinoshita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tadaaki", |
|
"middle": [], |
|
"last": "Oshio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomoharu", |
|
"middle": [], |
|
"last": "Mitsuhashi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 4th Workshop on Asian Translation (WAT2017)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "140--145", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Satoshi Kinoshita, Tadaaki Oshio, and Tomoharu Mit- suhashi. 2017. Comparison of smt and nmt trained with large patent corpora: Japio at wat2017. In Pro- ceedings of the 4th Workshop on Asian Translation (WAT2017), pages 140-145. Asian Federation of Nat- ural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "OpenNMT: Opensource toolkit for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuntian", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Senellart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of ACL 2017, System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "67--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Klein, Yoon Kim, Yuntian Deng, Jean Senel- lart, and Alexander Rush. 2017. OpenNMT: Open- source toolkit for neural machine translation. In Pro- ceedings of ACL 2017, System Demonstrations, pages 67-72, Vancouver, Canada. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Fine-grained human evaluation of neural versus phrase-based machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Filip", |
|
"middle": [], |
|
"last": "Klubi\u010dka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Toral", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "V\u00edctor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "S\u00e1nchez-Cartagena", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Filip Klubi\u010dka, Antonio Toral, and V\u00edctor M. S\u00e1nchez- Cartagena. 2017. Fine-grained human evaluation of neural versus phrase-based machine translation. CoRR, abs/1706.04389.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Quantitative fine-grained human evaluation of machine translation systems", |
|
"authors": [ |
|
{ |
|
"first": "Filip", |
|
"middle": [], |
|
"last": "Klubi\u010dka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Toral", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "V\u00edctor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "S\u00e1nchez-Cartagena", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Filip Klubi\u010dka, Antonio Toral, and V\u00edctor M. S\u00e1nchez- Cartagena. 2018. Quantitative fine-grained human evaluation of machine translation systems: a case study on English to Croatian. CoRR, abs/1802.01451.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Improved backing-off for mgram language modeling", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Kneser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "International Conference on Acoustics, Speech, and Signal Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "181--184", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICASSP.1995.479394" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Kneser and H. Ney. 1995. Improved backing-off for m- gram language modeling. In 1995 International Con- ference on Acoustics, Speech, and Signal Processing, volume 1, pages 181-184 vol.1.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Statistical significance tests for machine translation evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 2004 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "388--395", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn. 2004. Statistical significance tests for machine translation evaluation. In Proceedings of the 2004 Conference on Empirical Methods in Natu- ral Language Processing (EMNLP), pages 388-395, Barcelona, Spain.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Moses: Open source toolkit for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Bertoldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brooke", |
|
"middle": [], |
|
"last": "Cowan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wade", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Moran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Zens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Bojar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "ACL 2007, Proceedings of the Interactive Poster and Demonstration Sessions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--180", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn, Hieu Hoang, Alexandra Birch, Chris Callison-Burch, Marcello Federico, Nicola Bertoldi, Brooke Cowan, Wade Shen, Christine Moran, Richard Zens, Chris Dyer, Ond\u0159ej Bojar, Alexandra Constantin, Williams College, and Evan Herbst. 2007. Moses: Open source toolkit for statistical machine translation. In ACL 2007, Proceedings of the Interactive Poster and Demonstration Sessions, pages 177-180, Prague, Czech Republic.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Six challenges for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Knowles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the First Workshop on Neural Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "28--39", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn and Rebecca Knowles. 2017. Six chal- lenges for neural machine translation. In Proceedings of the First Workshop on Neural Machine Translation, pages 28-39, Vancouver.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Phrasebased & neural unsupervised machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ludovic", |
|
"middle": [], |
|
"last": "Denoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc'aurelio", |
|
"middle": [], |
|
"last": "Ranzato", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample, Myle Ott, Alexis Conneau, Ludovic Denoyer, and Marc'Aurelio Ranzato. 2018. Phrase- based & neural unsupervised machine translation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Multilingual denoising pretraining for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Edunov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marjan", |
|
"middle": [], |
|
"last": "Ghazvininejad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2001.08210" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, and Luke Zettlemoyer. 2020. Multilingual denoising pre- training for neural machine translation. arXiv preprint arXiv:2001.08210.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Multidimensional Quality Metrics (MQM): A framework for declaring and describing translation quality metrics. Tradum\u00e1tica: tecnologies de la traducci\u00f3", |
|
"authors": [ |
|
{ |
|
"first": "Arle", |
|
"middle": [], |
|
"last": "Richard Lommel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hans", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aljoscha", |
|
"middle": [], |
|
"last": "Burchardt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "455--463", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arle Richard Lommel, Hans Uszkoreit, and Aljoscha Burchardt. 2014. Multidimensional Quality Metrics (MQM): A framework for declaring and describing translation quality metrics. Tradum\u00e1tica: tecnologies de la traducci\u00f3, (12):455-463.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Translation of patent sentences with a large vocabulary of technical terms using neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Zi", |
|
"middle": [], |
|
"last": "Long", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takehito", |
|
"middle": [], |
|
"last": "Utsuro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomoharu", |
|
"middle": [], |
|
"last": "Mitsuhashi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mikio", |
|
"middle": [], |
|
"last": "Yamamoto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 3rd Workshop on Asian Translation (WAT2016)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "47--57", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zi Long, Takehito Utsuro, Tomoharu Mitsuhashi, and Mikio Yamamoto. 2016. Translation of patent sen- tences with a large vocabulary of technical terms us- ing neural machine translation. In Proceedings of the 3rd Workshop on Asian Translation (WAT2016), pages 47-57, Osaka, Japan.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Results of the wmt19 metrics shared task: Segment-level and strong mt systems pose big challenges", |
|
"authors": [ |
|
{ |
|
"first": "Qingsong", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johnny", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Bojar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yvette", |
|
"middle": [], |
|
"last": "Graham", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Conference on Machine Translation", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "62--90", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qingsong Ma, Johnny Wei, Ond\u0159ej Bojar, and Yvette Gra- ham. 2019. Results of the wmt19 metrics shared task: Segment-level and strong mt systems pose big chal- lenges. In Proceedings of the Fourth Conference on Machine Translation (Volume 2: Shared Task Papers, Day 1), pages 62-90, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Unsupervised neural machine translation initialized by unsupervised statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Marie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atsushi", |
|
"middle": [], |
|
"last": "Fujita", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.12703" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benjamin Marie and Atsushi Fujita. 2018. Unsuper- vised neural machine translation initialized by unsu- pervised statistical machine translation. arXiv preprint arXiv:1810.12703.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "A continuous improvement framework of machine translation for Shipibokonibo", |
|
"authors": [ |
|
{ |
|
"first": "H\u00e9ctor Erasmo G\u00f3mez", |
|
"middle": [], |
|
"last": "Montoya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kervy Dante Rivas", |
|
"middle": [], |
|
"last": "Rojas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arturo", |
|
"middle": [], |
|
"last": "Oncevay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2nd Workshop on Technologies for MT of Low Resource Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "17--23", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H\u00e9ctor Erasmo G\u00f3mez Montoya, Kervy Dante Rivas Ro- jas, and Arturo Oncevay. 2019. A continuous improve- ment framework of machine translation for Shipibo- konibo. In Proceedings of the 2nd Workshop on Tech- nologies for MT of Low Resource Languages, pages 17-23, Dublin, Ireland. European Association for Ma- chine Translation.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Exploiting linguistic resources for neural machine translation using multitask learning", |
|
"authors": [ |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Niehues", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eunah", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Second Conference on Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "80--89", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jan Niehues and Eunah Cho. 2017. Exploiting linguistic resources for neural machine translation using multi- task learning. In Proceedings of the Second Confer- ence on Machine Translation, pages 80-89, Copen- hagen, Denmark.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Comparison between nmt and pbsmt performance for translating noisy usergenerated content", |
|
"authors": [ |
|
{ |
|
"first": "Jos\u00e9 Carlos Rosales", |
|
"middle": [], |
|
"last": "Nunez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Djam\u00e9", |
|
"middle": [], |
|
"last": "Seddah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Wisniewski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "NEAL Proceedings of the 22nd Nordic Conference on Computional Linguistics (NoDaLiDa)", |
|
"volume": "167", |
|
"issue": "", |
|
"pages": "2--14", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jos\u00e9 Carlos Rosales Nunez, Djam\u00e9 Seddah, and Guil- laume Wisniewski. 2019. Comparison between nmt and pbsmt performance for translating noisy user- generated content. In NEAL Proceedings of the 22nd Nordic Conference on Computional Linguistics (NoDaLiDa), September 30-October 2, Turku, Fin- land, 167, pages 2-14. Link\u00f6ping University Elec- tronic Press.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Neural machine translation for low-resource languages", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "\u00d6stling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00f6rg", |
|
"middle": [], |
|
"last": "Tiedemann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert \u00d6stling and J\u00f6rg Tiedemann. 2017. Neural ma- chine translation for low-resource languages. CoRR, abs/1708.05729.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "BLEU: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "ACL-2002: 40th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. BLEU: a method for automatic eval- uation of machine translation. In ACL-2002: 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, PA. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "chrf: character n-gram f-score for automatic mt evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Maja", |
|
"middle": [], |
|
"last": "Popovi\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the Tenth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "392--395", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maja Popovi\u0107. 2015. chrf: character n-gram f-score for automatic mt evaluation. In Proceedings of the Tenth Workshop on Statistical Machine Translation, pages 392-395.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Comparing language related issues for nmt and pbmt between German and English", |
|
"authors": [ |
|
{ |
|
"first": "Maja", |
|
"middle": [], |
|
"last": "Popovi\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "The Prague Bulletin of Mathematical Linguistics", |
|
"volume": "108", |
|
"issue": "1", |
|
"pages": "209--220", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maja Popovi\u0107. 2017. Comparing language related is- sues for nmt and pbmt between German and En- glish. The Prague Bulletin of Mathematical Linguis- tics, 108(1):209-220.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Investigating low-resource machine translation for English-to-Tamil", |
|
"authors": [ |
|
{ |
|
"first": "Akshai", |
|
"middle": [], |
|
"last": "Ramesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rejwanul", |
|
"middle": [], |
|
"last": "Venkatesh Balavadhani Parthasarathy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Haque", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Way", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of Proceedings of the AACL-IJCNLP 2020 Workshop on Technologies for MT of Low Resource Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Akshai Ramesh, Venkatesh Balavadhani Parthasarathy, Rejwanul Haque, and Andy Way. 2020. Investigat- ing low-resource machine translation for English-to- Tamil. In Proceedings of Proceedings of the AACL- IJCNLP 2020 Workshop on Technologies for MT of Low Resource Languages (LoResMT 2020), Suzhou, China.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Comet: A neural framework for mt evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Ricardo", |
|
"middle": [], |
|
"last": "Rei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Craig", |
|
"middle": [], |
|
"last": "Stewart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ana", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Farinha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Lavie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2009.09025" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ricardo Rei, Craig Stewart, Ana C Farinha, and Alon Lavie. 2020. Comet: A neural framework for mt eval- uation. arXiv preprint arXiv:2009.09025.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Wikimatrix: Mining 135m parallel sentences in 1620 language pairs from wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vishrav", |
|
"middle": [], |
|
"last": "Chaudhary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuo", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongyu", |
|
"middle": [], |
|
"last": "Gong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Guzm\u00e1n", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Holger Schwenk, Vishrav Chaudhary, Shuo Sun, Hongyu Gong, and Francisco Guzm\u00e1n. 2019. Wikimatrix: Mining 135m parallel sentences in 1620 language pairs from wikipedia. arXiv preprint 1907.05791.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Improving neural machine translation models with monolingual data", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "86--96", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-1009" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016a. Improving neural machine translation models with monolingual data. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 86-96, Berlin, Germany. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Neural machine translation of rare words with subword units", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1715--1725", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016b. Neural machine translation of rare words with subword units. In Proceedings of the 54th Annual Meeting of the Association for Computational Lin- guistics (Volume 1: Long Papers), pages 1715-1725, Berlin, Germany.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Revisiting lowresource neural machine translation: A case study", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Biao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "211--221", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1021" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich and Biao Zhang. 2019. Revisiting low- resource neural machine translation: A case study. In Proceedings of the 57th Annual Meeting of the As- sociation for Computational Linguistics, pages 211- 221, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "Empirical evaluation of nmt and pbsmt quality for large-scale translation production", |
|
"authors": [ |
|
{ |
|
"first": "Dimitar", |
|
"middle": [], |
|
"last": "Shterionov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pat", |
|
"middle": [], |
|
"last": "Nagle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Casanellas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Riccardo", |
|
"middle": [], |
|
"last": "Superbo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tony O'", |
|
"middle": [], |
|
"last": "Dowd", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "User track of the 20th Annual Conference of the European Association for Machine Translation (EAMT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "74--79", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dimitar Shterionov, Pat Nagle, Laura Casanellas, Ric- cardo Superbo, and Tony O'Dowd. 2017. Empirical evaluation of nmt and pbsmt quality for large-scale translation production. In User track of the 20th Annual Conference of the European Association for Machine Translation (EAMT), pages 74-79, Prague, Czech Republic.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "Human versus automatic quality evaluation of nmt and pbsmt", |
|
"authors": [ |
|
{ |
|
"first": "Dimitar", |
|
"middle": [], |
|
"last": "Shterionov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Riccardo", |
|
"middle": [], |
|
"last": "Superbo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pat", |
|
"middle": [], |
|
"last": "Nagle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Casanellas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Tony", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "'dowd", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Way", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Machine Translation", |
|
"volume": "32", |
|
"issue": "3", |
|
"pages": "217--235", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dimitar Shterionov, Riccardo Superbo, Pat Nagle, Laura Casanellas, Tony O'dowd, and Andy Way. 2018. Hu- man versus automatic quality evaluation of nmt and pbsmt. Machine Translation, 32(3):217-235.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "A study of translation edit rate with targeted human annotation", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Snover", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bonnie", |
|
"middle": [], |
|
"last": "Dorr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linnea", |
|
"middle": [], |
|
"last": "Micciulla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Makhoul", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of association for machine translation in the Americas", |
|
"volume": "200", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Snover, Bonnie Dorr, Richard Schwartz, Linnea Micciulla, and John Makhoul. 2006. A study of trans- lation edit rate with targeted human annotation. In Pro- ceedings of association for machine translation in the Americas, volume 200. Cambridge, MA.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "On the limitations of unsupervised bilingual dictionary induction", |
|
"authors": [ |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Vuli\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "778--788", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1072" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anders S\u00f8gaard, Sebastian Ruder, and Ivan Vuli\u0107. 2018. On the limitations of unsupervised bilingual dictionary induction. In Proceedings of the 56th Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), pages 778-788, Melbourne, Australia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF55": { |
|
"ref_id": "b55", |
|
"title": "Translation quality and productivity: A study on rich morphology languages", |
|
"authors": [ |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kim", |
|
"middle": [], |
|
"last": "Harris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fr\u00e9d\u00e9ric", |
|
"middle": [], |
|
"last": "Blain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aljoscha", |
|
"middle": [], |
|
"last": "Burchardt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vivien", |
|
"middle": [], |
|
"last": "Macketanz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Inguna", |
|
"middle": [], |
|
"last": "Skadi\u0146a", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matteo", |
|
"middle": [], |
|
"last": "Negri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Turchi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of MT Summit XVI, the 16th Machine Translation Summit", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "55--71", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lucia Specia, Kim Harris, Fr\u00e9d\u00e9ric Blain, Aljoscha Bur- chardt, Vivien Macketanz, Inguna Skadi\u0146a, Matteo Ne- gri, and Marco Turchi. 2017. Translation quality and productivity: A study on rich morphology languages. In Proceedings of MT Summit XVI, the 16th Machine Translation Summit, pages 55-71, Nagoya, Japan.", |
|
"links": null |
|
}, |
|
"BIBREF56": { |
|
"ref_id": "b56", |
|
"title": "Parallel data, tools and interfaces in OPUS", |
|
"authors": [ |
|
{ |
|
"first": "J\u00f6rg", |
|
"middle": [], |
|
"last": "Tiedemann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 8th International Conference on Language Resources and Evaluation (LREC'2012)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2214--2218", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J\u00f6rg Tiedemann. 2012. Parallel data, tools and interfaces in OPUS. In Proceedings of the 8th International Conference on Language Resources and Evaluation (LREC'2012), pages 2214-2218, Istanbul, Turkey.", |
|
"links": null |
|
}, |
|
"BIBREF57": { |
|
"ref_id": "b57", |
|
"title": "A multifaceted evaluation of neural versus phrasebased machine translation for 9 language directions", |
|
"authors": [ |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Toral", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "V\u00edctor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "S\u00e1nchez-Cartagena", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antonio Toral and V\u00edctor M. S\u00e1nchez-Cartagena. 2017. A multifaceted evaluation of neural versus phrase- based machine translation for 9 language directions. CoRR, abs/1701.02901.", |
|
"links": null |
|
}, |
|
"BIBREF58": { |
|
"ref_id": "b58", |
|
"title": "What level of quality can neural machine translation attain on literary text?", |
|
"authors": [ |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Toral", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Way", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Translation Quality Assessment", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "263--287", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antonio Toral and Andy Way. 2018. What level of qual- ity can neural machine translation attain on literary text? In Translation Quality Assessment, pages 263- 287. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF60": { |
|
"ref_id": "b60", |
|
"title": "Quality expectations of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Way", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Translation quality assessment", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "159--178", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andy Way. 2018. Quality expectations of machine trans- lation. In S. Castilho, J. Moorkens, F. Gaspari, and S. Doherty, editors, Translation quality assessment, pages 159-178. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF61": { |
|
"ref_id": "b61", |
|
"title": "The Bloomsbury Companion to Language Industry Studies", |
|
"authors": [ |
|
{ |
|
"first": "Andy", |
|
"middle": [ |
|
"Way" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andy Way. 2019. Machine translation: where are we at today? In Erik Angelone, Maureen Ehrensberger- Dow, and Gary Massey, editors, The Bloomsbury Com- panion to Language Industry Studies. Bloomsbury Academic Publishing.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF0": { |
|
"content": "<table><tr><td/><td/><td colspan=\"2\">Hindi-to-Tamil</td><td/></tr><tr><td/><td/><td>sents.</td><td colspan=\"2\">words [Hi] words [Ta]</td></tr><tr><td/><td colspan=\"4\">MIXED 1,00,047 1,705,034 1,196,008</td></tr><tr><td/><td>vocab</td><td/><td>104,564</td><td>284,921</td></tr><tr><td>train</td><td>avg. sent</td><td/><td>17</td><td>14</td></tr><tr><td>sets</td><td>IT</td><td>48,461</td><td>3,54,426</td><td>2,76,514</td></tr><tr><td/><td>vocab</td><td/><td>31,258</td><td>67,069</td></tr><tr><td/><td>avg. sent</td><td/><td>8</td><td>7</td></tr><tr><td>devset</td><td/><td>1,500</td><td>10,903</td><td>7,879</td></tr><tr><td>testset</td><td/><td>1,500</td><td>9,362</td><td>6,748</td></tr><tr><td/><td/><td colspan=\"2\">English-to-Tamil</td><td/></tr><tr><td/><td/><td>sents.</td><td colspan=\"2\">words [En] words [Ta]</td></tr><tr><td/><td colspan=\"4\">MIXED 222,367 5,355,103 4,066,449</td></tr><tr><td/><td>vocab</td><td/><td>424,701</td><td>423,599</td></tr><tr><td>train</td><td>avg. sent</td><td/><td>25</td><td>19</td></tr><tr><td>sets</td><td>IT</td><td>68,352</td><td>448,966</td><td>407,832</td></tr><tr><td/><td>vocab</td><td/><td>31,216</td><td>77,323</td></tr><tr><td/><td>avg. sent</td><td/><td>7</td><td>6</td></tr><tr><td>devset</td><td/><td>1,500</td><td>17,903</td><td>13,879</td></tr><tr><td>testset</td><td/><td>1,500</td><td>16,020</td><td>12,925</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Data Statistics" |
|
}, |
|
"TABREF1": { |
|
"content": "<table><tr><td/><td colspan=\"2\">English-Tamil Hindi-Tamil</td></tr><tr><td>PB-SMT</td><td>9.56</td><td>5.48</td></tr><tr><td>NMT</td><td>4.35</td><td>2.10</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "The Mixed Setup." |
|
}, |
|
"TABREF2": { |
|
"content": "<table><tr><td/><td colspan=\"2\">English-to-Tamil Hindi-to-Tamil</td></tr><tr><td>PB-SMT</td><td>15.47</td><td>4.12</td></tr><tr><td>NMT</td><td>9.14</td><td>2.12</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "The IT Setup." |
|
}, |
|
"TABREF3": { |
|
"content": "<table><tr><td>; Section</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "" |
|
}, |
|
"TABREF5": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Translations that are good in quality were unfairly penalised by the BLEU metric." |
|
}, |
|
"TABREF7": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Term omission." |
|
}, |
|
"TABREF9": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Incorrect lexical selection in translation." |
|
}, |
|
"TABREF11": { |
|
"content": "<table><tr><td colspan=\"4\">we show an English source sentence and its Tamil</td></tr><tr><td colspan=\"4\">translation by the NMT system. The English equiv-</td></tr><tr><td colspan=\"4\">alent of the Tamil translation is 'This billion people</td></tr><tr><td colspan=\"4\">1.25'. As we can see, this error makes the transla-</td></tr><tr><td colspan=\"4\">tion less fluent. The SMT system overtranslates the</td></tr><tr><td colspan=\"4\">English source sentence, i.e. \"It has a population of</td></tr><tr><td colspan=\"2\">1.25 billion in one country\".</td><td/><td/></tr><tr><td>Eng. NMT SMT</td><td>Statistics of games played \u0bb5\u0bb0 [pul \u0323l \u0323ivivaram] \u0bb5\u0bb0 \u0bc8\u0bb3\u0baf\u0bbe varam vil \u0323aiy\u0101t \u0323t \u0323ukal \u0323i]</td><td>\u0b95</td><td>[pul \u0323l \u0323ivi-</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Reordering error in translation." |
|
}, |
|
"TABREF12": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Word drop in translation." |
|
}, |
|
"TABREF14": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "" |
|
}, |
|
"TABREF16": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Spurious Words in the translation." |
|
} |
|
} |
|
} |
|
} |