|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T04:33:44.754788Z" |
|
}, |
|
"title": "Improving NMT via Filtered Back Translation", |
|
"authors": [ |
|
{ |
|
"first": "Nikhil", |
|
"middle": [], |
|
"last": "Jaiswal", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "TCS Research", |
|
"location": { |
|
"settlement": "New Delhi", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Mayur", |
|
"middle": [], |
|
"last": "Patidar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "TCS Research", |
|
"location": { |
|
"settlement": "New Delhi", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Surabhi", |
|
"middle": [], |
|
"last": "Kumari", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "TCS Research", |
|
"location": { |
|
"settlement": "New Delhi", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Manasi", |
|
"middle": [], |
|
"last": "Patwardhan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "TCS Research", |
|
"location": { |
|
"settlement": "New Delhi", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Shirish", |
|
"middle": [], |
|
"last": "Karande", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "TCS Research", |
|
"location": { |
|
"settlement": "New Delhi", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Puneet", |
|
"middle": [], |
|
"last": "Agarwal", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "TCS Research", |
|
"location": { |
|
"settlement": "New Delhi", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Lovekesh", |
|
"middle": [], |
|
"last": "Vig", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "TCS Research", |
|
"location": { |
|
"settlement": "New Delhi", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Document-Level Machine Translation (MT) has become an active research area among the NLP community in recent years. Unlike sentence-level MT, which translates the sentences independently, document-level MT aims to utilize contextual information while translating a given source sentence. This paper demonstrates our submission (Team ID-DEEPNLP) to the Document-Level Translation task organized by WAT 2020 1. This task focuses on translating texts from a business dialog corpus while optionally utilizing the context present in the dialog. In our proposed approach, we utilize publicly available parallel corpus from different domains to train an open domain base NMT model. We then use monolingual target data to create filtered pseudo parallel data and employ Back-Translation to finetune the base model. This is further followed by fine-tuning on the domain-specific corpus. We also ensemble various models to improvise the translation performance. Our best models achieve a BLEU score of 26.59 and 22.83 in an unconstrained setting and 15.10 and 10.91 in the constrained settings for En \u2192 Ja & Ja \u2192 En direction, respectively.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Document-Level Machine Translation (MT) has become an active research area among the NLP community in recent years. Unlike sentence-level MT, which translates the sentences independently, document-level MT aims to utilize contextual information while translating a given source sentence. This paper demonstrates our submission (Team ID-DEEPNLP) to the Document-Level Translation task organized by WAT 2020 1. This task focuses on translating texts from a business dialog corpus while optionally utilizing the context present in the dialog. In our proposed approach, we utilize publicly available parallel corpus from different domains to train an open domain base NMT model. We then use monolingual target data to create filtered pseudo parallel data and employ Back-Translation to finetune the base model. This is further followed by fine-tuning on the domain-specific corpus. We also ensemble various models to improvise the translation performance. Our best models achieve a BLEU score of 26.59 and 22.83 in an unconstrained setting and 15.10 and 10.91 in the constrained settings for En \u2192 Ja & Ja \u2192 En direction, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Neural Machine Translation (Bahdanau et al., 2015; Vaswani et al., 2017a) has performed impressively in recent years, especially for high resource language pairs. However, one of the shortcomings while translating texts in the form of a paragraph or a document is that the inter-relations among sentences are ignored and the sentences are translated independently. Document Level MT (Maruf et al., 2020; Zhang and Zong, 2020; Kim et al., 2019b) aims to utilize these inter-sentential context information to deal with context-dependent phenomena such as coreference, lexical cohesion, and consistency, lexical disambiguation, etc. (Voita et al., 2019; Lopes et al., 2020) The meaning of a translated sentence can deviate from its originality when treated independently. WAT 2020's (Nakazawa et al., 2020 ) Document-level Business Scene Dialogue (BSD) Translation sub-task aims to foster research in the area of document-level MT. To tackle this task, we perform the following steps. Firstly, we gather several publicly available English Japanese corpus and combine them to train an open domain base model. Then, we utilize the monolingual corpus in the target language to create the pseudo parallel corpus. Since the generated pseudo parallel corpus might consist of noisy translated sentences, we use a sentence-level similarity-based filtration technique to filter out such pairs. We then fine-tune the base model on the filtered data followed by fine-tuning on in-domain parallel BSD 2 data. We also utilize checkpoint ensembles to further improve the translation performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 50, |
|
"text": "(Bahdanau et al., 2015;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 51, |
|
"end": 73, |
|
"text": "Vaswani et al., 2017a)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 383, |
|
"end": 403, |
|
"text": "(Maruf et al., 2020;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 404, |
|
"end": 425, |
|
"text": "Zhang and Zong, 2020;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 426, |
|
"end": 444, |
|
"text": "Kim et al., 2019b)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 630, |
|
"end": 650, |
|
"text": "(Voita et al., 2019;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 651, |
|
"end": 670, |
|
"text": "Lopes et al., 2020)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 780, |
|
"end": 802, |
|
"text": "(Nakazawa et al., 2020", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This task aims to translate all the sentences in the BSD test file from Ja \u2192 En and vice-versa. Participants could participate either in the constrained setting in which only the official BSD corpus needs to be used or in an unconstrained setting where other resources such as parallel corpora, monolingual corpora, and parallel dictionaries in addition to the official corpora could be utilized. We participate in both settings. BLEU (Papineni et al., 2002) , RIBES 3 and AMFM (Banchs et al., 2015) are used as the official automatic evaluation metrics and are calculated on the tokenized version of translated and reference sentences using different tokenizers such as Juman, KyTea, MeCab, & Moses.", |
|
"cite_spans": [ |
|
{ |
|
"start": 435, |
|
"end": 458, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 478, |
|
"end": 499, |
|
"text": "(Banchs et al., 2015)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Description", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "There are two major directions in MT, which has attracted a lot of attention from the research community in recent years -Document Level MT & MT on low resource language pairs. Numerous works have been proposed to tackle document-level MT (Maruf et al., 2019; Miculicich Werlen et al., 2018) . This area's work involves utilizing contexts on source, target, or both side and designing architectures using either the single (Ma et al., 2020) or additional encoder (Zhang et al., 2018; to handle contextual information. Some work in this area also tries to analyze the contextual errors (Kim et al., 2019a) . The work related to low resource language pairs involves making use of monolingual data to create pseudo parallel corpus using back translation (Sennrich et al., 2016) , iterative back translation (Hoang et al., 2018) & filtered back translation techniques (Junczys-Dowmunt, 2018; Dou et al., 2020) , etc. For filtering noisy pairs, Imankulova et al. (2017) uses the Round Trip BLEU score between true and synthetic sentences. Wang et al. (2019) propose dynamic domain-data selection along with dynamic clean-data selection.", |
|
"cite_spans": [ |
|
{ |
|
"start": 239, |
|
"end": 259, |
|
"text": "(Maruf et al., 2019;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 260, |
|
"end": 291, |
|
"text": "Miculicich Werlen et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 440, |
|
"text": "(Ma et al., 2020)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 463, |
|
"end": 483, |
|
"text": "(Zhang et al., 2018;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 585, |
|
"end": 604, |
|
"text": "(Kim et al., 2019a)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 751, |
|
"end": 774, |
|
"text": "(Sennrich et al., 2016)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 864, |
|
"end": 887, |
|
"text": "(Junczys-Dowmunt, 2018;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 888, |
|
"end": 905, |
|
"text": "Dou et al., 2020)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 940, |
|
"end": 964, |
|
"text": "Imankulova et al. (2017)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1034, |
|
"end": 1052, |
|
"text": "Wang et al. (2019)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We describe our proposed approach in this section. For the unconstrained setting, we first create BASE models by training N M T s\u2192t and N M T t\u2192s on the open domain dataset. Then, we use these trained", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Description", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "N M T s\u2192t & N M T t\u2192s models to translate the monolingual data M s & M t to M t & M s re- spectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Description", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We then utilize the pseudo parallel data M t and M s along with equal amount of true parallel data to fine-tune N M T t\u2192s model. Similarly, we use the pseudo parallel data M s and M t along with equal amount of true parallel data to fine-tune N M T s\u2192t model. This results in the creation of the back-translated (BT) models. In other settings, instead of utilizing the entire pseudo parallel data, we apply the filtering technique described below on these data to filter out noisy pairs. Then we use these filtered pairs along with an equal amount of true parallel data to fine-tune the N M T t\u2192s & N M T s\u2192t models. This results in the creation of the filtered back-translated (FBT) models. We further fine-tune BT as well as FBT models on the BSD corpus. For the constrained setting, we train N M T s\u2192t and N M T t\u2192s models directly on the BSD corpus. We also experiment with fine-tuning mBART (Liu et al., 2020 ) model on the BSD corpus. We finally build several ensembles by averaging checkpoints of different trained models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 896, |
|
"end": 913, |
|
"text": "(Liu et al., 2020", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Description", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Filtering Technique:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Description", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We apply a naive filtering model based on sentence similarity to filter out noisy pseudo parallel data. Given a monolingual source sentence S, we obtain the corresponding translated sentence T using the trained N M T s\u2192t model. We then apply MUSE (Multilingual Universal Sentence Encoder) (Yang et al., 2019) to obtain the sentence embeddings of S and T. Then cosine similarity is calculated on the obtained embeddings of S and T, and if the cosine score is below a certain threshold, we treat this pair as noisy. The threshold value is decided based on the cosine score on the entire monolingual data and its corresponding generated translations. We also utilize this filtering strategy to sample sentence pairs from the true parallel data. For this, we sort the entire true parallel data in decreasing order of similarity scores.", |
|
"cite_spans": [ |
|
{ |
|
"start": 289, |
|
"end": 308, |
|
"text": "(Yang et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Description", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Then we remove pairs that contain the text in the same language in the source and target side using Langid (Lui and Baldwin, 2012) library. We also remove pairs where the same text is present on the source and target side. Finally, we return the top n sentence pairs from the above data where n is the number of samples required from the true parallel data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 130, |
|
"text": "(Lui and Baldwin, 2012)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Description", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We collect and merge several publicly available parallel corpus for training the BASE models on the open domain data. We use datasets from sev- , TED Talks (Hochreiter and Schmidhuber, 1997) and MTNT (Machine Translation of Noisy Text) parallel corpus (Michel and Neubig, 2018) . While combining the datasets, we follow the train, validation, and test set as provided in the respective corpus and use it in a similar fashion in our combined dataset. We sample 3 million monolingual data from News Crawl 4 dataset to create pseudo parallel corpus. Along with these pseudo parallel data, we randomly sample the same amount of true parallel data from the open domain dataset. We combine and shuffle both the pseudo parallel and true parallel data. Finally, we utilize the BSD corpus provided by WAT 2020. This corpus is manually created and consists of Japanese-English business conversations. We use the provided training, development, and evaluation splits, which are described in Table 2 . We use different preprocessing rules for each translation direction based on our initial experimentation results as well as the findings from the literature. For Ja-En, we train & apply sentencepiece (Kudo and Richardson, 2018) model to tokenize the raw text into subwords with the vocabulary size of 32,000 for each language. For En-Ja, we first tokenize the raw text by KyTea and the Moses tok-enizer for Japanese and English, respectively. We also use Moses toolkits to truecase English words. We then further train & apply sentencepiece model to tokenize these words into subwords with the vocabulary size of 32,000 for each language.", |
|
"cite_spans": [ |
|
{ |
|
"start": 156, |
|
"end": 190, |
|
"text": "(Hochreiter and Schmidhuber, 1997)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 252, |
|
"end": 277, |
|
"text": "(Michel and Neubig, 2018)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1190, |
|
"end": 1217, |
|
"text": "(Kudo and Richardson, 2018)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 980, |
|
"end": 987, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Preparation & Preprocessing", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Here, we describe a detailed setup of our experiments in both the constrained and unconstrained settings. For the unconstrained setting, we utilize Transformer-base (Vaswani et al., 2017b) model for training the open domain BASE models. The encoder and decoder consist of 6 layers, 8 attention heads, and the hidden size is kept to 512. We use Adam optimizer with an initial learning rate of 0.001 and dropout regularization, whose value is fixed at 0.3. We use Fairseq (Ott et al., 2019) to implement all our experiments. All the models are trained until the convergence with patience of five. Once the BASE models are trained, we use monolingual data to create pseudo parallel data and train the BT models. For filtering based on the sentence similarity, we use the MUSE model from the TensorFlow Hub library to obtain the sentence embeddings. For the constrained setting, we experiment with a Transformer-base model with two as well as three encoder and decoder layers for training on BSD corpus. We also experiment with fine-tuning the mBART model on the BSD corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 165, |
|
"end": 188, |
|
"text": "(Vaswani et al., 2017b)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 470, |
|
"end": 488, |
|
"text": "(Ott et al., 2019)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation Details", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "This section discusses the results of our different experiments on both constrained and unconstrained settings. For the unconstrained setting, we first summarize the results of the BASE model, BT model, and the FBT model in both directions on four different publicly available test sets, including the BSD corpus in Table 3 . From the table, we can observe that performing BT as well as FBT helps in improvising the BLEU score of the BASE model. We use the BLEU score to compare the BASE, BT, FBT and the ensemble models which are fine-tuned on the BSD corpus. We report the results using different tokenizers in each direction.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 316, |
|
"end": 323, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "LP \u2192 En-Ja Ja-En Constrained 2.60 2.40 Unconstrained 4.13 4.10 Table 6 : We report the Human evaluation result of the Pairwise Crowdsourcing by WAT2020. This was evaluated by 5 different workers, and the final decision is made by the voting of the judgements.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 63, |
|
"end": 70, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "So, in the zero-shot setting where none of the BSD data is used for training, we are able to obtain a BLEU score of 18.9 and 16.1 in En \u2192 Ja & Ja \u2192 En directions, respectively. We use KyTea as the tokenizer for the Japanese sentences in the results mentioned in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 262, |
|
"end": 269, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "For the constrained setting, Table 4 presents the overall results. For the En \u2192 Ja translation, the BLEU score using different Japanese tokenizers such as juman, kytea, and mecab are reported. For the Ja \u2192 En direction, moses tokenizer is used for the evaluation. Although the ensemble model gave us better performance compared to the single model alone, but it is the mBART model whose fine-tuning on BSD corpus surpasses all other models by a large margin in both directions. Table 5 presents the unconstrained setting results obtained by fine-tuning the BASE, BT and FBT models on the BSD corpus. It also reports the results of ensembles formed by using different models. We can observe that the ensemble model comprising of fine-tuning BASE and FBT models gives us the best performance for the En \u2192 Ja direction, whereas in the case of Ja \u2192 En, ensemble model comprising of fine-tuning BASE and BT models achieves the highest BLEU score. Table 6 reports the human evaluation results in both the settings.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 29, |
|
"end": 36, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 478, |
|
"end": 485, |
|
"text": "Table 5", |
|
"ref_id": "TABREF7" |
|
}, |
|
{ |
|
"start": 942, |
|
"end": 949, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We experimented with a variety of techniques in both constrained & unconstrained settings. For the constrained setting, fine-tuning mBART on the BSD corpus gave the best translation performance in both directions. Thus, mBART can be fine-tuned for MT tasks, especially for low resource language pairs. For the unconstrained scenario, the models trained & fine-tuned using the pseudo-parallel corpus showed the best overall translation performance. We also showed that by using a simple ensemble technique of averaging different model checkpoints, the translation performance could be further improvised.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "https://github.com/tsuruoka-lab/BSD 3 http://www.kecl.ntt.co.jp/icl/lirg/ ribes/index.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://www.statmt.org/wmt20/ translation-task.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Neural machine translation by jointly learning to align and translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "3rd International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In 3rd Inter- national Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Adequacy-fluency metrics: Evaluating mt in the continuous space model framework", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Banchs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Haro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "IEEE/ACM Transactions on Audio, Speech, and Language Processing", |
|
"volume": "23", |
|
"issue": "3", |
|
"pages": "472--482", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/TASLP.2015.2405751" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. E. Banchs, L. F. D'Haro, and H. Li. 2015. Ade- quacy-fluency metrics: Evaluating mt in the contin- uous space model framework. IEEE/ACM Transac- tions on Audio, Speech, and Language Processing, 23(3):472-482.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Dynamic data selection and weighting for iterative back-translation", |
|
"authors": [ |
|
{ |
|
"first": "Zi-Yi", |
|
"middle": [], |
|
"last": "Dou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonios", |
|
"middle": [], |
|
"last": "Anastasopoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zi-Yi Dou, Antonios Anastasopoulos, and Graham Neubig. 2020. Dynamic data selection and weight- ing for iterative back-translation.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Iterative backtranslation for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Duy", |
|
"middle": [], |
|
"last": "Vu Cong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gholamreza", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Haffari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cohn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2nd Workshop on Neural Machine Translation and Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "18--24", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-2703" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vu Cong Duy Hoang, Philipp Koehn, Gholamreza Haffari, and Trevor Cohn. 2018. Iterative back- translation for neural machine translation. In Pro- ceedings of the 2nd Workshop on Neural Machine Translation and Generation, pages 18-24, Mel- bourne, Australia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural computation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1735--80", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, pages 1735-80.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Improving low-resource neural machine translation with filtered pseudo-parallel corpus", |
|
"authors": [ |
|
{ |
|
"first": "Aizhan", |
|
"middle": [], |
|
"last": "Imankulova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takayuki", |
|
"middle": [], |
|
"last": "Sato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mamoru", |
|
"middle": [], |
|
"last": "Komachi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 4th Workshop on Asian Translation (WAT2017)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "70--78", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aizhan Imankulova, Takayuki Sato, and Mamoru Ko- machi. 2017. Improving low-resource neural ma- chine translation with filtered pseudo-parallel cor- pus. In Proceedings of the 4th Workshop on Asian Translation (WAT2017), pages 70-78, Taipei, Tai- wan. Asian Federation of Natural Language Process- ing.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Dual conditional cross-entropy filtering of noisy parallel corpora", |
|
"authors": [ |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Junczys-Dowmunt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Third Conference on Machine Translation: Shared Task Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "888--895", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-6478" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcin Junczys-Dowmunt. 2018. Dual conditional cross-entropy filtering of noisy parallel corpora. In Proceedings of the Third Conference on Machine Translation: Shared Task Papers, pages 888-895, Belgium, Brussels. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "When and why is document-level context useful in neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Yunsu", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Duc", |
|
"middle": [], |
|
"last": "Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "24--34", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-6503" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yunsu Kim, Duc Tran, and Hermann Ney. 2019a. When and why is document-level context useful in neural machine translation? pages 24-34.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "When and why is document-level context useful in neural machine translation?", |
|
"authors": [ |
|
{ |
|
"first": "Yunsu", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thanh", |
|
"middle": [], |
|
"last": "Duc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yunsu Kim, Duc Thanh Tran, and Hermann Ney. 2019b. When and why is document-level context useful in neural machine translation?", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Sentencepiece: A simple and language independent subword tokenizer and detokenizer for neural text processing", |
|
"authors": [ |
|
{ |
|
"first": "Taku", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Richardson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taku Kudo and John Richardson. 2018. Sentencepiece: A simple and language independent subword tok- enizer and detokenizer for neural text processing. CoRR, abs/1808.06226.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Multilingual denoising pre-training for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Edunov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marjan", |
|
"middle": [], |
|
"last": "Ghazvininejad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, and Luke Zettlemoyer. 2020. Multilingual denoising pre-training for neural machine translation.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Document-level neural MT: A systematic comparison", |
|
"authors": [ |
|
{ |
|
"first": "Ant\u00f3nio", |
|
"middle": [], |
|
"last": "Lopes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Amin Farajian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Bawden", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e9", |
|
"middle": [ |
|
"F T" |
|
], |
|
"last": "Martins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 22nd Annual Conference of the European Association for Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "225--234", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ant\u00f3nio Lopes, M. Amin Farajian, Rachel Bawden, Michael Zhang, and Andr\u00e9 F. T. Martins. 2020. Document-level neural MT: A systematic compari- son. In Proceedings of the 22nd Annual Conference of the European Association for Machine Transla- tion, pages 225-234, Lisboa, Portugal. European As- sociation for Machine Translation.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Langid.py: An off-the-shelf language identification tool", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Lui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Baldwin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "25--30", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Lui and Timothy Baldwin. 2012. Langid.py: An off-the-shelf language identification tool. pages 25-30.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "A simple and effective unified encoder for documentlevel machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Shuming", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongdong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3505--3511", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.321" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shuming Ma, Dongdong Zhang, and Ming Zhou. 2020. A simple and effective unified encoder for document- level machine translation. In Proceedings of the 58th Annual Meeting of the Association for Compu- tational Linguistics, pages 3505-3511, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Selective attention for context-aware neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Sameen", |
|
"middle": [], |
|
"last": "Maruf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Andr\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gholamreza", |
|
"middle": [], |
|
"last": "Martins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Haffari", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "3092--3102", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1313" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sameen Maruf, Andr\u00e9 F. T. Martins, and Gholamreza Haffari. 2019. Selective attention for context-aware neural machine translation. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, Volume 1 (Long and Short Papers), pages 3092-3102, Minneapolis, Min- nesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "A survey on document-level neural machine translation: Methods and evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Sameen", |
|
"middle": [], |
|
"last": "Maruf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fahimeh", |
|
"middle": [], |
|
"last": "Saleh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gholamreza", |
|
"middle": [], |
|
"last": "Haffari", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sameen Maruf, Fahimeh Saleh, and Gholamreza Haf- fari. 2020. A survey on document-level neural ma- chine translation: Methods and evaluation.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "MTNT: A testbed for machine translation of noisy text", |
|
"authors": [ |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Michel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paul Michel and Graham Neubig. 2018. MTNT: A testbed for machine translation of noisy text. CoRR, abs/1809.00388.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Document-level neural machine translation with hierarchical attention networks", |
|
"authors": [ |
|
{ |
|
"first": "Lesly", |
|
"middle": [], |
|
"last": "Miculicich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhananjay", |
|
"middle": [], |
|
"last": "Ram", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikolaos", |
|
"middle": [], |
|
"last": "Pappas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Henderson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2947--2954", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1325" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lesly Miculicich, Dhananjay Ram, Nikolaos Pappas, and James Henderson. 2018. Document-level neu- ral machine translation with hierarchical attention networks. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Process- ing, pages 2947-2954, Brussels, Belgium. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Documentlevel neural machine translation with hierarchical attention networks", |
|
"authors": [ |
|
{ |
|
"first": "Lesly", |
|
"middle": [], |
|
"last": "Miculicich Werlen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhananjay", |
|
"middle": [], |
|
"last": "Ram", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikolaos", |
|
"middle": [], |
|
"last": "Pappas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Henderson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lesly Miculicich Werlen, Dhananjay Ram, Nikolaos Pappas, and James Henderson. 2018. Document- level neural machine translation with hierarchical at- tention networks.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Overview of the 7th workshop on Asian translation", |
|
"authors": [ |
|
{ |
|
"first": "Toshiaki", |
|
"middle": [], |
|
"last": "Nakazawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hideki", |
|
"middle": [], |
|
"last": "Nakayama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenchen", |
|
"middle": [], |
|
"last": "Ding", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raj", |
|
"middle": [], |
|
"last": "Dabre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hideya", |
|
"middle": [], |
|
"last": "Mino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isao", |
|
"middle": [], |
|
"last": "Goto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Win", |
|
"middle": [ |
|
"Pa" |
|
], |
|
"last": "Pa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anoop", |
|
"middle": [], |
|
"last": "Kunchukuttan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shantipriya", |
|
"middle": [], |
|
"last": "Parida", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Bojar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sadao", |
|
"middle": [], |
|
"last": "Kurohashi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 7th Workshop on Asian Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Toshiaki Nakazawa, Hideki Nakayama, Chenchen Ding, Raj Dabre, Hideya Mino, Isao Goto, Win Pa Pa, Anoop Kunchukuttan, Shantipriya Parida, Ond\u0159ej Bojar, and Sadao Kurohashi. 2020. Overview of the 7th workshop on Asian transla- tion. In Proceedings of the 7th Workshop on Asian Translation, Suzhou, China. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "The Kyoto free translation task", |
|
"authors": [ |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Graham Neubig. 2011. The Kyoto free translation task. http://www.phontron.com/kftt.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "fairseq: A fast, extensible toolkit for sequence modeling", |
|
"authors": [ |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Edunov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexei", |
|
"middle": [], |
|
"last": "Baevski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Grangier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Auli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of NAACL-HLT 2019: Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, and Michael Auli. 2019. fairseq: A fast, extensible toolkit for sequence modeling. In Proceedings of NAACL-HLT 2019: Demonstrations.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1073083.1073135" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Com- putational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "JESC: japanese-english subtitle corpus", |
|
"authors": [ |
|
{ |
|
"first": "Reid", |
|
"middle": [], |
|
"last": "Pryzant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yongjoo", |
|
"middle": [], |
|
"last": "Chung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Denny", |
|
"middle": [], |
|
"last": "Britz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Reid Pryzant, Yongjoo Chung, Dan Jurafsky, and Denny Britz. 2017. JESC: japanese-english subtitle corpus. CoRR, abs/1710.10639.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Wikimatrix: Mining 135m parallel sentences in 1620 language pairs from wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vishrav", |
|
"middle": [], |
|
"last": "Chaudhary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuo", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongyu", |
|
"middle": [], |
|
"last": "Gong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Guzm\u00e1n", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Holger Schwenk, Vishrav Chaudhary, Shuo Sun, Hongyu Gong, and Francisco Guzm\u00e1n. 2019. Wikimatrix: Mining 135m parallel sentences in 1620 language pairs from wikipedia. CoRR, abs/1907.05791.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Improving neural machine translation models with monolingual data", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Improving neural machine translation models with monolingual data.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017a. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Gar- nett, editors, Advances in Neural Information Pro- cessing Systems 30, pages 5998-6008. Curran Asso- ciates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaiser", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 31st International Conference on Neural Information Processing Systems, NIPS'17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6000--6010", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, undefine- dukasz Kaiser, and Illia Polosukhin. 2017b. Atten- tion is all you need. In Proceedings of the 31st Inter- national Conference on Neural Information Process- ing Systems, NIPS'17, page 6000-6010, Red Hook, NY, USA. Curran Associates Inc.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "When a good translation is wrong in context: Context-aware machine translation improves on deixis, ellipsis, and lexical cohesion", |
|
"authors": [ |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Voita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elena Voita, Rico Sennrich, and Ivan Titov. 2019. When a good translation is wrong in context: Context-aware machine translation improves on deixis, ellipsis, and lexical cohesion. CoRR, abs/1905.05979.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Dynamically composing domain-data selection with clean-data selection by \"co-curricular learning\" for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isaac", |
|
"middle": [], |
|
"last": "Caswell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ciprian", |
|
"middle": [], |
|
"last": "Chelba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1282--1292", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1123" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Wang, Isaac Caswell, and Ciprian Chelba. 2019. Dynamically composing domain-data selection with clean-data selection by \"co-curricular learning\" for neural machine translation. In Proceedings of the 57th Annual Meeting of the Association for Com- putational Linguistics, pages 1282-1292, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Chris Tar, Yun-Hsuan Sung, Brian Strope, and Ray Kurzweil. 2019. Multilingual universal sentence encoder for semantic retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Yinfei", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amin", |
|
"middle": [], |
|
"last": "Ahmad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandy", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jax", |
|
"middle": [], |
|
"last": "Law", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "Constant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gustavo", |
|
"middle": [ |
|
"Hern\u00e1nde\u017a" |
|
], |
|
"last": "Abrego", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinfei Yang, Daniel Cer, Amin Ahmad, Mandy Guo, Jax Law, Noah Constant, Gustavo Hern\u00e1nde\u017a Abrego, Steve Yuan, Chris Tar, Yun-Hsuan Sung, Brian Strope, and Ray Kurzweil. 2019. Multilingual universal sentence encoder for semantic retrieval. CoRR, abs/1907.04307.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Improving the transformer translation model with document-level context", |
|
"authors": [ |
|
{ |
|
"first": "Jiacheng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huanbo", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Feifei", |
|
"middle": [], |
|
"last": "Zhai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfang", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1049" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiacheng Zhang, Huanbo Luan, Maosong Sun, Feifei Zhai, Jingfang Xu, Min Zhang, and Yang Liu. 2018. Improving the transformer translation model with document-level context. In Proceedings of the 2018", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Conference on Empirical Methods in Natural Language Processing", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "533--542", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Conference on Empirical Methods in Natural Lan- guage Processing, pages 533-542, Brussels, Bel- gium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Neural machine translation: Challenges, progress and future", |
|
"authors": [ |
|
{ |
|
"first": "Jiajun", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengqing", |
|
"middle": [], |
|
"last": "Zong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiajun Zhang and Chengqing Zong. 2020. Neural ma- chine translation: Challenges, progress and future.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"text": "Open Domain Corpus Statistics", |
|
"num": null, |
|
"content": "<table><tr><td/><td colspan=\"3\">Training Development Test</td></tr><tr><td>Sentences</td><td>20,000</td><td>2,051</td><td>2,120</td></tr></table>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF2": { |
|
"text": "", |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"text": "We use the BLEU score to compare the BASE, BT, and FBT models trained on the open domain corpus to evaluate on different publicly available test sets including the BSD corpus.", |
|
"num": null, |
|
"content": "<table><tr><td>eral domains such as news, movie, Wikipedia ar-</td></tr><tr><td>ticles, etc., so that the BASE model is domain ag-</td></tr><tr><td>nostic. Table 1 presents the number of sentences</td></tr><tr><td>in each such parallel corpora. The final corpus</td></tr><tr><td>consists of around 8.3 million sentence pairs for</td></tr><tr><td>training, 10K for validation and 7K for test set</td></tr><tr><td>and is formed by combining following datasets</td></tr><tr><td>-KFTT (Kyoto Free Translation Task) (Neubig,</td></tr><tr><td>2011), JESC (Japanese-English Subtitle Corpus)</td></tr><tr><td>(Pryzant et al., 2017), Japanese-English Legal Par-</td></tr><tr><td>allel Corpus (Neubig, 2011), WikiMatrix (Schwenk</td></tr><tr><td>et al., 2019), News Commentary 4 , Wiki Titles v2 4</td></tr></table>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF6": { |
|
"text": "Constrained Setting: We use the BLEU score to compare the 2 & 3 layers Transformer (TRF) models trained on BSD corpus, as well as the ensemble model and the fine-tuned mBART model.", |
|
"num": null, |
|
"content": "<table><tr><td>LP \u2192</td><td/><td>En-Ja</td><td>Ja-En</td></tr><tr><td>Tokenizer \u2192</td><td colspan=\"3\">juman kytea mecab moses</td></tr><tr><td>Models \u2193</td><td/><td/><td/></tr><tr><td>BASE FT</td><td>18.76</td><td>25.86 20.19</td><td>21.74</td></tr><tr><td>BT FT</td><td>18.99</td><td>25.90 20.54</td><td>21.95</td></tr><tr><td>FBT FT</td><td>17.85</td><td>24.72 19.30</td><td>21.67</td></tr><tr><td>BASE FT + BT FT</td><td>19.20</td><td>26.27 20.77</td><td>22.83</td></tr><tr><td colspan=\"2\">BASE FT + FBT FT 19.39</td><td>26.59 20.95</td><td>22.75</td></tr></table>", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF7": { |
|
"text": "Unconstrained Setting:", |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |