|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T11:59:37.017606Z" |
|
}, |
|
"title": "Zero-shot translation among Indian languages", |
|
"authors": [ |
|
{ |
|
"first": "Rudali", |
|
"middle": [], |
|
"last": "Huidrom", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IPS Waseda University Kitakyushu", |
|
"location": { |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yves", |
|
"middle": [], |
|
"last": "Lepage", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Standard neural machine translation (NMT) allows a model to perform translation between a pair of languages. Multilingual neural machine translation (NMT), on the other hand, allows a model to perform translation between several language pairs, even between language pairs for which no sentences pair has been seen during training (zero-shot translation). This paper presents experiments with zero-shot translation on low resource Indian languages with a very small amount of data for each language pair. We first report results on balanced data over all considered language pairs. We then expand our experiments for additional three rounds by increasing the training data with 2,000 sentence pairs in each round for some of the language pairs. We obtain an increase in translation accuracy with its balanced data settings score multiplied by 7 for Manipuri to Hindi during Round-III of zeroshot translation.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Standard neural machine translation (NMT) allows a model to perform translation between a pair of languages. Multilingual neural machine translation (NMT), on the other hand, allows a model to perform translation between several language pairs, even between language pairs for which no sentences pair has been seen during training (zero-shot translation). This paper presents experiments with zero-shot translation on low resource Indian languages with a very small amount of data for each language pair. We first report results on balanced data over all considered language pairs. We then expand our experiments for additional three rounds by increasing the training data with 2,000 sentence pairs in each round for some of the language pairs. We obtain an increase in translation accuracy with its balanced data settings score multiplied by 7 for Manipuri to Hindi during Round-III of zeroshot translation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "End-to-end neural Machine Translation (NMT) Bahdanau et al., 2015; Cho et al., 2014) can be applied to low resource languages with the risk that small amounts of training data result in low translation accuracy (Koehn and Knowles, 2017) . Improvement in translation of low resource languages has been reported with the use of multilingual models (Ha et al., 2016; Johnson et al., 2017) , back-translation (Sennrich et al., 2016a) and unsupervised learning (Lample et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 44, |
|
"end": 66, |
|
"text": "Bahdanau et al., 2015;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 67, |
|
"end": 84, |
|
"text": "Cho et al., 2014)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 236, |
|
"text": "(Koehn and Knowles, 2017)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 346, |
|
"end": 363, |
|
"text": "(Ha et al., 2016;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 364, |
|
"end": 385, |
|
"text": "Johnson et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 405, |
|
"end": 429, |
|
"text": "(Sennrich et al., 2016a)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 456, |
|
"end": 477, |
|
"text": "(Lample et al., 2018)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Initially, MT systems were designed for one single language pair (Johnson et al., 2017) . However, NMT systems can be trained simultaneously on many language pairs. This enables translation from and into any of the languages used during training. Dong et al. (2015) first modified an attention-based encoder-decoder model so as to perform multilingual translation from one language to many languages while Luong et al. (2015) used multitask learning for multilingual training. Firat et al. (2016) introduced the notion of multilingual NMT, by sharing the attention mechanism across several languages. Gu et al. (2018) introduced universal machine translation, where a universal representation space is used for all languages. Johnson et al. (2017) introduced zero-shot translation: training on multiple source and target languages enables to translate arbitrarily between any of the languages used during training, even between languages for which no sentence pair was ever seen during training. The authors characterised zeroshot translation as \"a working example of transfer learning within neural translation models\".", |
|
"cite_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 87, |
|
"text": "(Johnson et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 265, |
|
"text": "Dong et al. (2015)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 406, |
|
"end": 425, |
|
"text": "Luong et al. (2015)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 477, |
|
"end": 496, |
|
"text": "Firat et al. (2016)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 601, |
|
"end": 617, |
|
"text": "Gu et al. (2018)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 726, |
|
"end": 747, |
|
"text": "Johnson et al. (2017)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our work consists in testing the use of zero-shot translation for a very low resource language, an Indian language called Manipuri, locally known as Meiteilon, in the context of training with other Indian languages between which no parallel data may exist. Again, Manipuri is a low resource language. It is spoken by about two million people predominantly in the state of Manipur, India. It is an endangered language (Moseley and Nicolas, 2010) from the Sino-Tibetan language family and it shows highly agglutinating word structure. With its language status as endangered, it is one of the two endangered languages of the 8th Schedule of the Indian Constitution. Machine translation for this language is at its infant stage due to the very limited amount of resources available.", |
|
"cite_spans": [ |
|
{ |
|
"start": 417, |
|
"end": 444, |
|
"text": "(Moseley and Nicolas, 2010)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We make use of the pmindia dataset 1 (Haddow and Kirefu, 2020) . This data set provides monolingual and parallel corpora with English for thirteen Indian languages. We take the following language pairs into consideration: Assamese-English, Bengali-English, Hindi-English and Manipuri-English 2 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 62, |
|
"text": "(Haddow and Kirefu, 2020)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our main objective is to measure how much accuracy can be achieved in translation from Manipuri into the three other Indian languages (Assamese, Bengali and Hindi), without using any data from these language pairs, thanks to zeroshot translation. Additionally, we use the JW300 dataset 3 (Agi\u0107 and Vuli\u0107, 2019; Tiedemann, 2012) for Assamese-English language pairs for two rounds of the experiment due to the limited number of data present in the pmindia data set for this language pair. In our experiments, we use only the above-mentioned resources.", |
|
"cite_spans": [ |
|
{ |
|
"start": 288, |
|
"end": 310, |
|
"text": "(Agi\u0107 and Vuli\u0107, 2019;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 311, |
|
"end": 327, |
|
"text": "Tiedemann, 2012)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our goal is to improve the translation quality of our zero-shot translation system among the low resourced languages. We propose to control the translation quality by introducing the notion of balanced data in the respective language pairs as a parameter.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The reason why we concentrate on Manipuri is because it is an extremely low resource language: only 7,000 sentence pairs in Manipuri-English are available in the pmindia data set. Developing MT systems with such a small amount of data is a true challenge. Our experiments consist in increasing the training data by groups of 2,000 sentence pairs (Indian language-English), in three rounds. We measure the translation accuracy between Manipuri and other Indian languages in zero-shot translation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The structure of the paper is as follows. Section 2 describes previous work. Section 3 gives details about the data set used. Section 4 presents the methodology. Section 5 describes the experiments, their results and provides an analysis. Section 6 concludes and proposes future directions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "NMT (Kalchbrenner and Blunsom, 2013; Bahdanau et al., 2015; Cho et al., 2014) for a single language pair has been explored extensively over the years. It has been extended to multilingual models (Dong et al., 2015; Luong et al., 2015; Ha et al., 2016; Firat et al., 2016; Johnson et al., 2017) on available multilingual data. One of the approach is that of zero-shot translation (Johnson et al., 2017; Arivazhagan et al., 2019) between language pairs for which no parallel data has been seen during training. Another interesting work addressed by (Johnson et al., 2017; Ha et al., 2016) is the introduction of artificial tokens. It helps in minimizing the architectural changes in the decoder.", |
|
"cite_spans": [ |
|
{ |
|
"start": 4, |
|
"end": 36, |
|
"text": "(Kalchbrenner and Blunsom, 2013;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 37, |
|
"end": 59, |
|
"text": "Bahdanau et al., 2015;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 60, |
|
"end": 77, |
|
"text": "Cho et al., 2014)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 195, |
|
"end": 214, |
|
"text": "(Dong et al., 2015;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 215, |
|
"end": 234, |
|
"text": "Luong et al., 2015;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 235, |
|
"end": 251, |
|
"text": "Ha et al., 2016;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 252, |
|
"end": 271, |
|
"text": "Firat et al., 2016;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 272, |
|
"end": 293, |
|
"text": "Johnson et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 379, |
|
"end": 401, |
|
"text": "(Johnson et al., 2017;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 402, |
|
"end": 427, |
|
"text": "Arivazhagan et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 547, |
|
"end": 569, |
|
"text": "(Johnson et al., 2017;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 570, |
|
"end": 586, |
|
"text": "Ha et al., 2016)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Zero-shot machine translation has been explored for low resource languages. Zoph and Knight (2016) proposed an approach for multi-source translation. Their model consists in multiple encoders with a different attention mechanism for each source language. However, this model requires a multi-way parallel corpus for every language pairs, which is hard to obtain, especially for languages with low resource.", |
|
"cite_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 98, |
|
"text": "Zoph and Knight (2016)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "NMT is capable of cross-lingual learning (Kim et al., 2019; Zoph and Knight, 2016) . This is the motivation for zero-shot translation. Firat et al. (2017) introduced the notion of zero-resource translation. They used a pre-trained multi-way multilingual model and performed fine-tuning with the pseudo parallel data generated by the model. Madaan and Sadat (2020) introduced an approach for improving multilingual NMT for Indian languages. They showed that their model is able to improve the translation for low resource language pairs by leveraging high resource language pairs, thanks to transfer learning.", |
|
"cite_spans": [ |
|
{ |
|
"start": 41, |
|
"end": 59, |
|
"text": "(Kim et al., 2019;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 60, |
|
"end": 82, |
|
"text": "Zoph and Knight, 2016)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 135, |
|
"end": 154, |
|
"text": "Firat et al. (2017)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 340, |
|
"end": 363, |
|
"text": "Madaan and Sadat (2020)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our work is closely related to (Johnson et al., 2017) : we analyse the performance of multilingual models on our data and perform zero-shot translation as well. The originality in our work is that we aim to improve the translation quality of our model and since we deal with low resource languages, we propose to control the translation quality such that we train, validate and test our model on balanced data sets across all the language pairs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 31, |
|
"end": 53, |
|
"text": "(Johnson et al., 2017)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We use the pmindia dataset (Haddow and Kirefu, 2020) . 4 This data set contains the official documents from the Prime Minister Office of the Government of India. It contains monolingual and parallel corpora. There are 13 Indian languages and English in it.", |
|
"cite_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 52, |
|
"text": "(Haddow and Kirefu, 2020)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 55, |
|
"end": 56, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We use data for four language pairs from the parallel corpus found in the data set: from Assamese, Bengali, Hindi and Manipuri into English. The Indian languages used belong to different language families, yet they have high lexical similarities because of regional influences. Assamese (ISO 639-2 asm), Bengali (ben) and Hindi (hin) belong to the Indo-Aryan language family, Manipuri (mni) belongs to the Sino-Tibetan language family and English (eng) belongs to the Indo-European language family. Assamese, Bengali and Manipuri share the same writing system, the Eastern Nagari script. Bengali has high language influence on Assamese, and some influence on Manipuri as well. Hindi, on the other hand, influences all the other languages, with a large number of words borrowed from it. Statistics about the data used are presented in Table 1 . The largest number of sentence pairs is for Hindi-English (almost 60,000). Only half is available for Bengali-English and less than 10,000 for Assamese-English and Manipuri-English, the latter one having only 7,419 sentence pairs. The number of words per sentence in all languages ranges from 15 to 20. The number of word types in each language reflects the number of sentences and the structure of the language: it is natural that the more the sentence pairs, the higher the number of word types; it should however be observed that, although the number of sentence pairs in Bengali-English is half of that in Hindi-English, the number of word types in Bengali is higher than in Hindi.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 834, |
|
"end": 841, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Additionally, we use the JW300 dataset (Agi\u0107 and Vuli\u0107, 2019; Tiedemann, 2012) for Assamese-English language pairs for two rounds due to the limited number of sentence pairs present in the pmindia data set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 61, |
|
"text": "(Agi\u0107 and Vuli\u0107, 2019;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 62, |
|
"end": 78, |
|
"text": "Tiedemann, 2012)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We propose to first measure the effect of using zero-shot translation on balanced data sets for all language pairs. We then expand our experiments and increase the training data with the aim of inspecting how efficient or not transfer learning can be. The increase will be performed by groups of 2,000 sentence pairs in training for language pairs excluding Manipuri. There will be three rounds of increase of data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "To deal with numerous source and target languages during multilingual or zero-shot translation training, we classically introduce an artificial token at the beginning of all the source language sentences. The artificial token contains the information about the source language and the target language for the sentence pair at hand.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our first series of experiments consists in measuring translation quality in a balanced data set setting. Our model is trained on an equal amount of training data for all the languages. We start with 5,000 sentences for training across all language pairs. The amount of test and validation data is 1,000 sentences each for all the languages in our model. Each language uses a balanced data set for training, validation and test in the Balanced round. The data is randomly selected. We do not perform pivoting through the English language because of the limited number of sentences in common.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our second series of experiments measures translation quality when increasing the data for other languages than Manipuri. We showed in Table 1 that the Manipuri-English language pair has the least number of sentence pairs (7,419) among all language pairs. Because of this, we first create a balanced data set of 7,000 sentences in total for all the language pairs. We then increment the training data by 2,000 sentence pairs in all language pairs, except Manipuri-English, in three rounds.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 135, |
|
"end": 142, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In total, we report translation quality for three types of models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 single models: these are models trained on a single language pair, one for each language pair.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 multilingual models: these are models trained on our data on different types of multilingual data, i.e., one-to-many, many-to-one and many-to-many (Johnson et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 149, |
|
"end": 171, |
|
"text": "(Johnson et al., 2017)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 zero-shot models: these are the models for testing zero-shot translation on the language pairs for which no parallel data was seen during training.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "An example of a zero-shot model is described below. Suppose that we have trained a model on the following language pairs: Manipuri-English and Assamese-English, in both directions, hence in 4 language directions. The zero-shot model will translate between Manipuri and Assamese in both directions, although no Assamese-Manipuri or Manipuri-Assamese sentence pair has been seen during training.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "One of our experiments focuses on zero-shot translation with balanced data set. For that, we test the three possible different combination of language pairs with our data. For one of the three Indian languages, Assamese, Bengali or Hindi, call it X, we build a system to perform zero-shot translation from Manipuri into X by using sentence pairs from Manipuri-English in both directions and X-English, in both directions too.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We change the conditions of the above experiments by increasing the training data with groups of 2,000 sentence pairs in the three Indian language to English language pairs for three rounds, for all of our models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We briefly outline the experimental setup used in all of our experiments in this section.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental setup", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We firstly introduce the three types of models used in our experiments. The single models are trained on a single language pair, the multilingual models are trained on different types of multilingual data and the zero-shot models are trained on language pairs which exclude the language pair to be tested. In a first series of experiments, we measure the translation accuracy of all our models on balanced data sets for all of our language pairs. For that, we randomly select 5,000 sentences for training and 1,000 sentences each for validation and testing from our data set for all the language pairs. Later, we expand our experiments by increasing the training data by 2,000 for all the language pairs excluding Manipuri-English language pair in three rounds.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental setup", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We train the single models on a single language pair, one for each language pair. There are a total of eight language pairs in our experiments: from each of Assamese (asm), Bengali (ben), Hindi (hin) and Manipuri (mni), into English and vice-versa. We then measure the effects of balanced data set on our single models. Each language pair has 5,000 sentences for training and 1,000 sentences each for validation and testing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Single models", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "We train our multilingual models (Johnson et al., 2017) on different types of configurations. They are listed below.", |
|
"cite_spans": [ |
|
{ |
|
"start": 33, |
|
"end": 55, |
|
"text": "(Johnson et al., 2017)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual models", |
|
"sec_num": "5.1.2" |
|
}, |
|
{ |
|
"text": "\u2022 One-to-Many: A One-to-Many multilingual model is trained on language pairs that has only one type of source language and different types of target languages. In simple terms, it is a model which translates one source language into many target languages. Because of our data set, our source language is English and the target languages are the Indian languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual models", |
|
"sec_num": "5.1.2" |
|
}, |
|
{ |
|
"text": "\u2022 Many-to-One: In a Many-to-One multilingual model, only language pairs that have several source languages and only one target language are used for training. This is the other direction than One-to-Many. Again, because of our data set, the source languages are the Indian languages and the target language is English.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual models", |
|
"sec_num": "5.1.2" |
|
}, |
|
{ |
|
"text": "\u2022 Many-to-Many: Many-to-Many multilingual model is trained on language pairs that have several source languages and several target languages. We train our model on language pairs that have source and target languages as Assamese (asm), Bengali (ben), Hindi (hin), Manipuri (mni) and English (eng) respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual models", |
|
"sec_num": "5.1.2" |
|
}, |
|
{ |
|
"text": "Lastly, we train zero-shot models for testing zeroshot translation as described in Section 4 on language pairs without parallel data. Our model translates between Manipuri into the other Indian languages, i.e., Assamese, Bengali and Hindi.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Zero-shot models", |
|
"sec_num": "5.1.3" |
|
}, |
|
{ |
|
"text": "Before preprocessing the data, we use Joint Byte-Pair Encoding (Sennrich et al., 2016b) to address the problem of rare words by using sub-word segmentation. We apply Byte-Pair Encoding (BPE) and perform sub-word segmentation on all of our selected data set with 10,000 merge operations so as to obtain a vocabulary representation of all our language pairs. For all of our experiments, we use the OpenNMT-py toolkit (Klein et al., 2017) . We preprocess the training and validation data set for all the language pairs after applying BPE. We train our model on a 2-layered RNN model with a bidirectional RNN as encoder and a simple RNN as decoder. We measure the translation accuracy of all our experiments using BLEU with a confidence at 95 % (Koehn, 2004) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 63, |
|
"end": 87, |
|
"text": "(Sennrich et al., 2016b)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 415, |
|
"end": 435, |
|
"text": "(Klein et al., 2017)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 741, |
|
"end": 754, |
|
"text": "(Koehn, 2004)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-processing and tools", |
|
"sec_num": "5.1.4" |
|
}, |
|
{ |
|
"text": "There are many RNN architectures available for NMT. We choose the default model provided by OpenNMT-py toolkit (Klein et al., 2017) . It is a seq2seq architecture with attention mechanism (Luong et al., 2015) . In our models, both the encoders and decoders are long short-term memory cells (Hochreiter and Schmidhuber, 1997) . The hyper-parameters are mostly the default ones provided by the toolkit. The exact values for the hyperparameters are listed in Table 2 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 111, |
|
"end": 131, |
|
"text": "(Klein et al., 2017)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 188, |
|
"end": 208, |
|
"text": "(Luong et al., 2015)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 290, |
|
"end": 324, |
|
"text": "(Hochreiter and Schmidhuber, 1997)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 456, |
|
"end": 463, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model configuration", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "It is known that the Transformer architecture (Vaswani et al., 2017) usually leads to better translation accuracy in comparison to the RNN architectures. For instance, Lakew et al. 2018report that their Transformer architecture outperforms the recurrent ones in all their systems. In our settings and with our datasets, this is not the case. For example, in the many-to-one multilingual experiments, the translation accuracy (measured using BLEU with confidence at 95%) for the Transformer architecture lies in the range of 0.9 \u00b1 0.2 to 8.6 \u00b1 1.0, whereas the results with recurrent architecture range from 2.2 \u00b1 0.5 to 9.6 \u00b1 1.1 (see Table 3 ). This justifies why we use the RNN architecture.", |
|
"cite_spans": [ |
|
{ |
|
"start": 46, |
|
"end": 68, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 635, |
|
"end": 642, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model configuration", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "In all of our experiments, the hyper-parameters are uniform throughout all the models. The model is trained on a 2 layered RNN model having layer size of 64 for embedding and 500 for inner layers. The LSTM has encoder type as bidirectional RNN and decoder as a simple RNN. Since our data set is very small, we use a drop-out (Srivastava et al., 2014) rate of 0.3 (Gal and Ghahramani, 2016) . We also use the general typed global attention mechanism onto the models. The models are trained with 10,000 training steps with checkpoints at every 5,000 steps.", |
|
"cite_spans": [ |
|
{ |
|
"start": 325, |
|
"end": 350, |
|
"text": "(Srivastava et al., 2014)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 363, |
|
"end": 389, |
|
"text": "(Gal and Ghahramani, 2016)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training settings", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "For optimization of the model during training, we use the Adam (Kingma and Ba, 2015) optimizer with a learning rate of 0.001. The number of steps before dropping the learning rate is set to 50,000 and the decay frequency which is the number of steps at which the learning rate starts to drop at each training step is taken as 10,000.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training settings", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "In this particular experimental setting for balanced data set of single and multilingual models, we observe that the multilingual models perform comparatively better than the single model in the case of Indian language-English language pairs, excluding Manipuri to English. See results in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 289, |
|
"end": 296, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and analysis", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "As for zero-shot translation, the BLEU scores reported are very low. In Table 4 , the results of the experiments on balanced data are shown under the label Balanced, while the results obtained when increasing the training data by 2,000 sentences for three rounds, are shown under the labels of Round-I, Round-II and Round-III.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 72, |
|
"end": 79, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and analysis", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "We observe that the translation accuracies in Round-I to Round-III are slightly higher in comparison to the results in Balanced. We also observe that zero-shot translation between Manipuri-Bengali performs comparatively better than the rest, with translation accuracy more than twice that of Manipuri-Assamese and 1.4 times that of Manipuri-Hindi, with statistical significance. Additionally, the scores of Manipuri-Bengali increases by twice in Round-III, with statistical significance,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and analysis", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "This work provided an investigation in the use of zero-shot translation between some Indian languages, in the context of low resource.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Firstly, we studied the influence of the balance in data sets across the considered language pairs. We observed that a multilingual model performs comparatively better than a baseline single model, in terms of BLEU scores. In addition, we observed that, in zero-shot translation, a balanced configuration does not perform well. As observed in other works, the use of NMT on a very small amount of data for training, validation and testing, results in low translation accuracy, because NMT has a steep learning curve with respect to amount of data (Koehn and Knowles, 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 547, |
|
"end": 572, |
|
"text": "(Koehn and Knowles, 2017)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The translation accuracy when incrementing the data size is comparatively better than in the balanced data set settings. We observed a very small increase in BLEU scores in the balanced settings from Round-I to Round-III, although there is no statistically significant difference. In zero-shot translation, Manipuri-Bengali recorded the highest BLEU score among all language pairs, while Manipuri-Assamese recorded the least score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the future, we would like to inspect the possibility of increasing the size of our data by using back-translation. We expect that synthetic data will help our models in improving the translation accuracy (Sennrich et al., 2016a) . We would also like to inspect the use of the unsupervised learning approach with adversarial training to learn a mapping from source to target languages without any parallel data or anchor points (Lample et al., 2018) on our models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 207, |
|
"end": 231, |
|
"text": "(Sennrich et al., 2016a)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 430, |
|
"end": 451, |
|
"text": "(Lample et al., 2018)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://data.statmt.org/pmindia/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The codes from ISO 639-2 for these languages are as follows: Assamese (asm), Bengali (ben), Hindi (hin), Manipuri (mni) and English (eng)3 http://opus.nlpl.eu/JW300.php", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.pmindia.gov.in/en/ pm-india-language-banner/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "Language Pair single multilingual one-to-many many-to-one many-to-many asm-eng 5.3 \u00b1 0.3 -5.9 \u00b1 0.9 6.0 \u00b1 0.9 ben-eng 0.9 \u00b1 0.2 -2.2 \u00b1 0.5 1.8 \u00b1 0.4 hin-eng 4.1 \u00b1 0.5 -4.6 \u00b1 0.6 4.6 \u00b1 0.6 mni-eng 9.7 \u00b1 1.1 -9.6 \u00b1 1.1 8.1 \u00b1 1.0 eng-asm 2.0 \u00b1 0.4 2.4 \u00b1 0.4 -1.7 \u00b1 0.3 eng-ben 1.4 \u00b1 0.4 3.7 \u00b1 0.5 -3.7 \u00b1 0.5 eng-hin 3.6 \u00b1 0.5 3.3 \u00b1 0.5 -3.3 \u00b1 0.5 eng-mni 50.5 \u00b1 0.7 10.3 \u00b1 0.9 -10.6 \u00b1 0.9 Table 3 : Experiment results from balanced data set setting for single models and multilingual models (one-to-many, many-to-one, many-to-many). The translation accuracy is measured with BLEU with a confidence at 95 %. Note: \"-\" represents that there is no experiment conducted for this language pairs on the model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 386, |
|
"end": 393, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "annex", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Balanced Round-I Round-II Round-III mni-asm 0.2 \u00b1 0.1 2.6 \u00b1 0.4 4.6 \u00b1 0.6 6.1 \u00b1 0.9 mni-ben 6.1 \u00b1 0.9 8.3 \u00b1 1.0 10.9 \u00b1 0.9 13.2 \u00b1 1.3 mni-hin 1.3 \u00b1 0.4 5.5 \u00b1 0.3 7.6 \u00b1 0.9 9.7 \u00b1 1.1 Table 4 : Experiment results of the zero-shot translation on balanced data set setting and, Round-I and Round-II and Round-III where the data for training is increased by 2,000 in Assamese to English (asm-eng), Bengali to English (ben-eng) and Hindi to English (hin-eng) language pairs for three rounds. Translation accuracy is measured using BLEU (in the range of 0-100) with confidence at 95 %.in comparison to Balanced. Lastly, scores in Manipuri-Assamese increase by 2 points with statistical significance, progressively from Balanced to Round-III. For Manipuri-Hindi, the scores of Round-III is Balanced multiplied by 7. The observed sentence behaviours of the translated sentences in Balanced in terms of average number of words per sentence for each language pair is half of the length of its reference sentences. In order to understand this characteristic, we looked into the training sentences added in each round. For all the language pairs, the average number of words in a sentence for training sentences before addition is lesser than the length of the test sentences. For example, in Assamese to English, the average number of words in a training sentence is 16 words initially and that of our added sentences for each round is 18 (20 for the test sentences). Thus, the average length of sentences added in each round becomes closer to the length in the test set.Our NMT system did not perform well in Balanced: it was not good at learning short sentences. This could explain why the length of the translated sentences is only half of that of reference sentences with repeated words. For example, there are only 584 unique words out of 14,893 words (4 % unique words) in the translated sentences of Manipuri-Assamese in Balanced.As we progress with the rounds, the translation accuracy increases and the behaviour of the translated sentences changes as well. The average length of sentences becomes closer to the length of the references as the rounds increase; the accuracy increases too. The average sentence length of Manipuri to Bengali is equal to the length of the reference sentences in Round-III (17 words). This language pair gives the highest BLEU scores among all other pairs. Manipuri-Bengali outperforms the rest because of Bengali having lexical influence over Manipuri. It is followed by Manipuri-Hindi, Manipuri-Assamese exhibiting the least score. This may be explained by the fact that they do not influence each other directly: Bengali influences Assamese and Manipuri but Assamese does not influence Manipuri.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 182, |
|
"end": 189, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Language Pair Zero-shot", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "JW300: A widecoverage parallel corpus for low-resource languages", |
|
"authors": [ |
|
{ |
|
"first": "Zeljko", |
|
"middle": [], |
|
"last": "Agi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Vuli\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3204--3210", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1310" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zeljko Agi\u0107 and Ivan Vuli\u0107. 2019. JW300: A wide- coverage parallel corpus for low-resource languages. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3204-3210, Florence, Italy. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Massively multilingual neural machine translation in the wild: Findings and challenges", |
|
"authors": [ |
|
{ |
|
"first": "Naveen", |
|
"middle": [], |
|
"last": "Arivazhagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Bapna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Orhan", |
|
"middle": [], |
|
"last": "Firat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Dmitry Lepikhin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Krikun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuan", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Foster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wolfgang", |
|
"middle": [], |
|
"last": "Cherry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Macherey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Naveen Arivazhagan, Ankur Bapna, Orhan Firat, Dmitry Lepikhin, M. Johnson, M. Krikun, M. Chen, Yuan Cao, G. Foster, Colin Cherry, Wolfgang Macherey, Z. Chen, and Y. Wu. 2019. Massively multilingual neural machine translation in the wild: Findings and challenges. ArXiv, abs/1907.05019.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Neural machine translation by jointly learning to align and translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "3rd International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In 3rd Inter- national Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Learning phrase representations using rnn encoderdecoder for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caglar", |
|
"middle": [], |
|
"last": "Van Merrienboer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gulcehre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Bougares", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kyunghyun Cho, B van Merrienboer, Caglar Gulcehre, F Bougares, H Schwenk, and Yoshua Bengio. 2014. Learning phrase representations using rnn encoder- decoder for statistical machine translation. In Con- ference on Empirical Methods in Natural Language Processing (EMNLP 2014).", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Multi-task learning for multiple language translation", |
|
"authors": [ |
|
{ |
|
"first": "Daxiang", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hua", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dianhai", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haifeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1723--1732", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/P15-1166" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daxiang Dong, Hua Wu, Wei He, Dianhai Yu, and Haifeng Wang. 2015. Multi-task learning for mul- tiple language translation. In Proceedings of the 53rd Annual Meeting of the Association for Compu- tational Linguistics and the 7th International Joint Conference on Natural Language Processing (Vol- ume 1: Long Papers), pages 1723-1732, Beijing, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Multi-way, multilingual neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Orhan", |
|
"middle": [], |
|
"last": "Firat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Baskaran", |
|
"middle": [], |
|
"last": "Sankaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"Yarman" |
|
], |
|
"last": "Fatos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Vural", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Comput. Speech Lang", |
|
"volume": "45", |
|
"issue": "C", |
|
"pages": "236--252", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.csl.2016.10.006" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Orhan Firat, Kyunghyun Cho, Baskaran Sankaran, Fatos T. Yarman Vural, and Yoshua Bengio. 2017. Multi-way, multilingual neural machine translation. Comput. Speech Lang., 45(C):236-252.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Zero-resource translation with multi-lingual neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Orhan", |
|
"middle": [], |
|
"last": "Firat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Baskaran", |
|
"middle": [], |
|
"last": "Sankaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaser", |
|
"middle": [], |
|
"last": "Al-Onaizan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fatos", |
|
"middle": [ |
|
"T Yarman" |
|
], |
|
"last": "Vural", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "268--277", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1026" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Orhan Firat, Baskaran Sankaran, Yaser Al-onaizan, Fatos T. Yarman Vural, and Kyunghyun Cho. 2016. Zero-resource translation with multi-lingual neural machine translation. In Proceedings of the 2016 Conference on Empirical Methods in Natural Lan- guage Processing, pages 268-277, Austin, Texas. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Dropout as a bayesian approximation: Representing model uncertainty in deep learning", |
|
"authors": [ |
|
{ |
|
"first": "Yarin", |
|
"middle": [], |
|
"last": "Gal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zoubin", |
|
"middle": [], |
|
"last": "Ghahramani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of Machine Learning Research", |
|
"volume": "48", |
|
"issue": "", |
|
"pages": "1050--1059", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yarin Gal and Zoubin Ghahramani. 2016. Dropout as a bayesian approximation: Representing model uncer- tainty in deep learning. In Proceedings of Machine Learning Research, volume 48, pages 1050-1059, New York, New York, USA. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Universal neural machine translation for extremely low resource languages", |
|
"authors": [ |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hany", |
|
"middle": [], |
|
"last": "Hassan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Victor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "344--354", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-1032" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiatao Gu, Hany Hassan, Jacob Devlin, and Victor O.K. Li. 2018. Universal neural machine translation for extremely low resource languages. In Proceedings of the 2018 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Pa- pers), pages 344-354, New Orleans, Louisiana. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Toward multilingual neural machine translation with universal encoder and decoder", |
|
"authors": [ |
|
{ |
|
"first": "Thanh", |
|
"middle": [ |
|
"Le" |
|
], |
|
"last": "Ha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Niehues", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Waibel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 13th International Workshop on Spoken Language Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thanh Le Ha, Jan Niehues, and Alexander Waibel. 2016. Toward multilingual neural machine transla- tion with universal encoder and decoder. In In Pro- ceedings of the 13th International Workshop on Spo- ken Language Translation (IWSLT 2016), Seattle.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Pmindia -a collection of parallel corpora of languages of india", |
|
"authors": [ |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Faheem", |
|
"middle": [], |
|
"last": "Kirefu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Barry Haddow and Faheem Kirefu. 2020. Pmindia -a collection of parallel corpora of languages of india. ArXiv, abs/2001.09907.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural Computation", |
|
"volume": "9", |
|
"issue": "8", |
|
"pages": "1735--1780", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/neco.1997.9.8.1735" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural Computation, 9(8):1735-1780.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Google's multilingual neural machine translation system: Enabling zero-shot translation", |
|
"authors": [ |
|
{ |
|
"first": "Macduff", |
|
"middle": [], |
|
"last": "Hughes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "339--351", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00065" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Macduff Hughes, and Jeffrey Dean. 2017. Google's multilingual neural machine translation system: En- abling zero-shot translation. Transactions of the As- sociation for Computational Linguistics, 5:339-351.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Recurrent continuous translation models", |
|
"authors": [ |
|
{ |
|
"first": "Nal", |
|
"middle": [], |
|
"last": "Kalchbrenner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1700--1709", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nal Kalchbrenner and Phil Blunsom. 2013. Recurrent continuous translation models. In Proceedings of the 2013 Conference on Empirical Methods in Natu- ral Language Processing, pages 1700-1709, Seattle, Washington, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Effective cross-lingual transfer of neural machine translation models without shared vocabularies", |
|
"authors": [ |
|
{ |
|
"first": "Yunsu", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yingbo", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1246--1257", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1120" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yunsu Kim, Yingbo Gao, and Hermann Ney. 2019. Effective cross-lingual transfer of neural machine translation models without shared vocabularies. In Proceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 1246- 1257, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "3rd International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In 3rd Inter- national Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "OpenNMT: Opensource toolkit for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuntian", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Senellart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of ACL 2017, System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "67--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Klein, Yoon Kim, Yuntian Deng, Jean Senel- lart, and Alexander Rush. 2017. OpenNMT: Open- source toolkit for neural machine translation. In Proceedings of ACL 2017, System Demonstrations, pages 67-72, Vancouver, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Statistical significance tests for machine translation evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 2004 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "388--395", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn. 2004. Statistical significance tests for machine translation evaluation. In Proceed- ings of the 2004 Conference on Empirical Meth- ods in Natural Language Processing, pages 388- 395, Barcelona, Spain. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Six challenges for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Knowles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the First Workshop on Neural Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "28--39", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-3204" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn and Rebecca Knowles. 2017. Six chal- lenges for neural machine translation. In Proceed- ings of the First Workshop on Neural Machine Trans- lation, pages 28-39, Vancouver. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A comparison of transformer and recurrent neural networks on multilingual neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Mauro", |
|
"middle": [], |
|
"last": "Surafel Melaku Lakew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Cettolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "641--652", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Surafel Melaku Lakew, Mauro Cettolo, and Marcello Federico. 2018. A comparison of transformer and recurrent neural networks on multilingual neural ma- chine translation. In Proceedings of the 27th Inter- national Conference on Computational Linguistics, pages 641-652, Santa Fe, New Mexico, USA. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Unsupervised machine translation using monolingual corpora only", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ludovic", |
|
"middle": [], |
|
"last": "Denoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc'aurelio", |
|
"middle": [], |
|
"last": "Ranzato", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "6th International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample, Alexis Conneau, Ludovic Denoyer, and Marc'Aurelio Ranzato. 2018. Unsupervised ma- chine translation using monolingual corpora only. In 6th International Conference on Learning Rep- resentations, ICLR 2018, Vancouver, BC, Canada, April 30 -May 3, 2018, Conference Track Proceed- ings. OpenReview.net.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Effective approaches to attention-based neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1412--1421", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D15-1166" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thang Luong, Hieu Pham, and Christopher D. Man- ning. 2015. Effective approaches to attention-based neural machine translation. In Proceedings of the 2015 Conference on Empirical Methods in Natu- ral Language Processing, pages 1412-1421, Lis- bon, Portugal. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Multilingual neural machine translation involving Indian languages", |
|
"authors": [ |
|
{ |
|
"first": "Pulkit", |
|
"middle": [], |
|
"last": "Madaan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fatiha", |
|
"middle": [], |
|
"last": "Sadat", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the WILDRE5-5th Workshop on Indian Language Data: Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "29--32", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pulkit Madaan and Fatiha Sadat. 2020. Multilin- gual neural machine translation involving Indian lan- guages. In Proceedings of the WILDRE5-5th Work- shop on Indian Language Data: Resources and Eval- uation, pages 29-32, Marseille, France. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Atlas of the world's languages in danger", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Moseley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Nicolas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher Moseley and Alexandre Nicolas. 2010. At- las of the world's languages in danger, 3 edition. UNESCO, France.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Improving neural machine translation models with monolingual data", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "86--96", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-1009" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016a. Improving neural machine translation mod- els with monolingual data. In Proceedings of the 54th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 86-96, Berlin, Germany. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Neural machine translation of rare words with subword units", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1715--1725", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016b. Neural machine translation of rare words with subword units. In Proceedings of the 54th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715- 1725, Berlin, Germany. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Dropout: A simple way to prevent neural networks from overfitting", |
|
"authors": [ |
|
{ |
|
"first": "Nitish", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Krizhevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "15", |
|
"issue": "56", |
|
"pages": "1929--1958", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. 2014. Dropout: A simple way to prevent neural networks from overfitting. Journal of Machine Learning Re- search, 15(56):1929-1958.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Sequence to sequence learning with neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3104--3112", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V Le. 2014. Sequence to sequence learning with neural networks. In Advances in neural information processing sys- tems, pages 3104-3112.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Parallel data, tools and interfaces in opus", |
|
"authors": [ |
|
{ |
|
"first": "Jorg", |
|
"middle": [], |
|
"last": "Tiedemann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Eight International Conference on Language Resources and Evaluation (LREC'12)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jorg Tiedemann. 2012. Parallel data, tools and inter- faces in opus. In Proceedings of the Eight Interna- tional Conference on Language Resources and Eval- uation (LREC'12), Istanbul, Turkey. European Lan- guage Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Gar- nett, editors, Advances in Neural Information Pro- cessing Systems 30, pages 5998-6008. Curran Asso- ciates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Multi-source neural translation", |
|
"authors": [ |
|
{ |
|
"first": "Barret", |
|
"middle": [], |
|
"last": "Zoph", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "30--34", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N16-1004" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Barret Zoph and Kevin Knight. 2016. Multi-source neural translation. In Proceedings of the 2016 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, pages 30-34, San Diego, Cali- fornia. Association for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"text": "Statistics on the data set used.", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"text": "Parameters used for RNN model. They are mostly from openNMT-py toolkit suggestions.", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |