|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T02:09:37.008298Z" |
|
}, |
|
"title": "ICT's System for AutoSimTrans 2021: Robust Char-Level Simultaneous Translation", |
|
"authors": [ |
|
{ |
|
"first": "Shaolei", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "ICT/CAS", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "ICT/CAS", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Simultaneous translation (ST) outputs the translation simultaneously while reading the input sentence, which is an important component of simultaneous interpretation. In this paper, we describe our submitted ST system, which won the first place in the streaming transcription input track of the Chinese-English translation task of AutoSimTrans 2021. Aiming at the robustness of ST, we first propose char-level simultaneous translation and applied wait-k policy on it. Meanwhile, we apply two data processing methods and combine two training methods for domain adaptation. Our method enhance the ST model with stronger robustness and domain adaptability. Experiments on streaming transcription show that our method outperforms the baseline at all latency, especially at low latency, the proposed method improves about 6 BLEU. Besides, ablation studies we conduct verify the effectiveness of each module in the proposed method.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Simultaneous translation (ST) outputs the translation simultaneously while reading the input sentence, which is an important component of simultaneous interpretation. In this paper, we describe our submitted ST system, which won the first place in the streaming transcription input track of the Chinese-English translation task of AutoSimTrans 2021. Aiming at the robustness of ST, we first propose char-level simultaneous translation and applied wait-k policy on it. Meanwhile, we apply two data processing methods and combine two training methods for domain adaptation. Our method enhance the ST model with stronger robustness and domain adaptability. Experiments on streaming transcription show that our method outperforms the baseline at all latency, especially at low latency, the proposed method improves about 6 BLEU. Besides, ablation studies we conduct verify the effectiveness of each module in the proposed method.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Automatic simultaneous translation (ST) (Cho and Esipova, 2016; Gu et al., 2017; Ma et al., 2019) , a task in machine translation (MT), aims to output the target translation while reading the source sentence. The standard machine translation is a full-sentence MT, which waits for the complete source input and then starts translation. The huge latency caused by full-sentence MT is unacceptable in many realtime scenarios. On the contrary, ST is widely used in real simultaneous speech translation scenarios, such as simultaneous interpretation, synchronized subtitles, and live broadcasting.", |
|
"cite_spans": [ |
|
{ |
|
"start": 40, |
|
"end": 63, |
|
"text": "(Cho and Esipova, 2016;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 64, |
|
"end": 80, |
|
"text": "Gu et al., 2017;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 81, |
|
"end": 97, |
|
"text": "Ma et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Previous methods (Ma et al., 2019; Arivazhagan et al., 2019) for ST are all evaluated on the existing full-sentence MT parallel corpus, ignoring the real speech translation scenario. In the real scene, the paradigm of simultaneous interpretation is Automatic Speech Recognition (ASR) \u2192 simultaneous translation (ST) \u2192 Text-to-Speech Synthesis (TTS), in which these three parts are all carried out simultaneously. As a downstream task of simultaneous ASR, the input of ST is always not exactly correct and in the spoken language domain. Thus, robustness and domain adaptability become two challenges for the ST system.", |
|
"cite_spans": [ |
|
{ |
|
"start": 17, |
|
"end": 34, |
|
"text": "(Ma et al., 2019;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 35, |
|
"end": 60, |
|
"text": "Arivazhagan et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "For robustness, since the input of the ST system is ASR result (streaming transcription), which is incremental and may be unsegmented or incorrectly segmented, the subword-level segmentation result (Ma et al., 2019) of the streaming transcription seriously affect the ST result. Existing methods (Li et al., 2020) often remove the last token after segmentation to prevent it from being incomplete, which leads to a considerable increase in latency. Table 1 shows an example of the tokenization result of the streaming transcription input with different methods. In steps 4-7 of standard wait-2, the input prefix is different from its previous step, while the previous output prefix is not allowed to be modified in ST, which leads to serious translation errors. Although removing the last token improves the robustness, there is no new input in many consecutive steps, which greatly increases the latency.", |
|
"cite_spans": [ |
|
{ |
|
"start": 198, |
|
"end": 215, |
|
"text": "(Ma et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 296, |
|
"end": 313, |
|
"text": "(Li et al., 2020)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 449, |
|
"end": 456, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "For domain adaptability, the existing spoken language domain corpus is lacking, while the general domain corpus for MT and the spoken language domain corpus for ST are quite different in terms of word order, punctuation and modal particles, so ST needs to efficiently complete the domain adaption.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In our system, we propose a Char-Level Wait-k Policy for simultaneous translation, which is more robust with streaming transcription input. Besides, we apply data augmentation and combine two training methods to train the model to complete domain adaptation. Specifically, the source of the char-level wait-k policy is a character sequence segmented according to characters, and the target still maintains subword-level segmentation and BPE operations (Sennrich et al., 2016) . When decoding, Table 1 : An example of the tokenization result of standard wait-k, standard wait-k+remove last token and charlevel wait-k, when dealing with streaming transcription input (take k = 2 as an example). Red mark: the source prefix changes during streaming input. Green mark: no input in consecutive steps since the last token is removed.", |
|
"cite_spans": [ |
|
{ |
|
"start": 452, |
|
"end": 475, |
|
"text": "(Sennrich et al., 2016)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 493, |
|
"end": 500, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Streaming Transcription Standard Wait-2 Standard Wait-2 Remove Last Token Char-Level Wait-2 (Ours) \u4ed6\u662f\u7814\u7a76\u751f\u7269\u7684 \u4ed6\u662f\u7814\u7a76\u751f\u7269\u7684 \u4ed6 / \u662f / \u4ed6 / \u4ed6 / \u662f / \u4ed6\u662f\u7814\u7a76\u751f\u7269\u7684 \u4ed6 / \u662f / \u7814 / \u4ed6 / \u662f / Delay \u4ed6 / \u662f / \u7814 / \u4ed6\u662f\u7814\u7a76\u751f\u7269\u7684 \u4ed6 / \u662f / \u7814\u7a76 / \u4ed6 / \u662f / Delay \u4ed6 / \u662f / \u7814 / \u7a76 / \u4ed6\u662f\u7814\u7a76\u751f\u7269\u7684 \u4ed6 / \u662f / \u7814\u7a76\u751f / \u4ed6 / \u662f / Delay \u4ed6 / \u662f / \u7814 / \u7a76 / \u751f / \u4ed6\u662f\u7814\u7a76\u751f\u7269\u7684 \u4ed6 / \u662f / \u7814\u7a76 / \u751f\u7269 / \u4ed6 / \u662f / \u7814\u7a76 / \u4ed6 / \u662f / \u7814 / \u7a76 / \u751f / \u7269 / \u4ed6\u662f\u7814\u7a76\u751f\u7269\u7684 \u4ed6 / \u662f / \u7814\u7a76 / \u751f\u7269 / \u7684 / \u4ed6 / \u662f / \u7814\u7a76 / \u751f\u7269 / \u7684 / \u4ed6 / \u662f / \u7814 / \u7a76 / \u751f / \u7269 / \u7684 /", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tokenization of Streaming Transcription Input", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "the char-level wait-k policy first waits for k source characters, then alternately reads a character, and outputs a target subword. Table 1 shows the tokenization results of the char-level wait-k policy, which not only guarantees the stability of the input prefix but also avoids unnecessary latency. To adapt to the spoken language domain, we first pretrain an ST model on the general domain corpus and perform fine-tuning on the spoken language domain corpus. To improve the effect and efficiency of domain adaptation, we carry out data augmentation on both the general domain corpus and spoken language domain corpus and combine two different training methods for training.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 132, |
|
"end": 139, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Tokenization of Streaming Transcription Input", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the streaming transcription track for the Chinese \u2192 English translation task of AutoSimTrans 2021, we evaluate the proposed method on the real speech corpus (Zhang et al., 2021) . Our method exceeds the baseline model at all latency and performs more prominently at lower latency.", |
|
"cite_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 180, |
|
"text": "(Zhang et al., 2021)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tokenization of Streaming Transcription Input", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our contributions can be summarized as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tokenization of Streaming Transcription Input", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 To our best knowledge, we are the first to propose char-level simultaneous translation, which is more robust when dealing with real streaming input.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tokenization of Streaming Transcription Input", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 We apply data augmentation and incorporate two training methods, which effectively improve the domain adaptation and overcome the shortage of spoken language corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tokenization of Streaming Transcription Input", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We participated in the streaming transcription input track of the Chinese-English translation task of AutoSimTrans 2021 1 . An example of the task is shown in Table 2 . Streaming transcription is manually transcribed without word segmentation. Between each step, the source input adds one more character. The task applies AL and BLEU respectively to evaluate the latency and translation quality of the submitted system.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 159, |
|
"end": 166, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our system is based on a variant of wait-k policy (Ma et al., 2019) , so we first briefly introduce waitk policy and its training method. Wait-k policy refers to waiting for k source tokens first, and then reading and writing alternately, i.e., the output always delays k tokens after the input. As shown by 'standard wait-k policy' in Figure 1 , if k = 2, the first target token was output after reading 2 source tokens, and then output a target token as soon as a source token is read.", |
|
"cite_spans": [ |
|
{ |
|
"start": 50, |
|
"end": 67, |
|
"text": "(Ma et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 336, |
|
"end": 344, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Define g (t) as a monotonic non-decreasing function of t, which represents the number of source tokens read in when outputting the target token y t . For the wait-k policy, g (t) is calculated as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "g (t) = min {k + t \u2212 1, |x|} , t = 1, 2, \u2022 \u2022 \u2022 (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "where x is the input subword sequence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Wait-k policy is trained with \"prefix-to-prefix\" framework. In \"prefix-to-prefix\" framework, when generating the t th target word, the source tokens participating in encoder is limited to less than g (t).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "To improve the robustness and domain adaptability of ST, we enhance our system from read / write policy, data processing and training methods respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "To enhance the robustness of dealing with streaming transcription, we first proposed char-level simultaneous translation and applied the wait-k policy on it.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Char-Level Wait-k Policy", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Character-level neural machine translation (Ling et al., 2015; Lee et al., 2017; Cherry et al., 2018; Gao et al., 2020) tokenizes the source sentence and target sentence according to characters, thereby gaining advantages over subword-level neural machine translation in some specific aspects, such as avoiding out-of-vocabulary problems (Passban et al., 2018) and errors caused by subword-level segmentation (Tang et al., 2020) . In terms of translation quality, the character-level MT is still difficult to compare with the subword-level MT. An important reason is that only one wrong generated character will directly cause the entire target word wrong (Sennrich, 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 43, |
|
"end": 62, |
|
"text": "(Ling et al., 2015;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 63, |
|
"end": 80, |
|
"text": "Lee et al., 2017;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 81, |
|
"end": 101, |
|
"text": "Cherry et al., 2018;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 102, |
|
"end": 119, |
|
"text": "Gao et al., 2020)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 338, |
|
"end": 360, |
|
"text": "(Passban et al., 2018)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 409, |
|
"end": 428, |
|
"text": "(Tang et al., 2020)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 656, |
|
"end": 672, |
|
"text": "(Sennrich, 2017)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Char-Level Simultaneous Translation", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "To improve the robustness of the ST system when dealing with unsegmented incremental input, while avoiding the performance degradation caused by character-level MT, we propose char-level simultaneous translation, which is more suitable for streaming input. The framework of char-level ST is shown in the lower part of Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 318, |
|
"end": 326, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Char-Level Simultaneous Translation", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "Different from subword-level ST, given the parallel sentence pair < X, Y >, the source of the ST model in the proposed char-level ST is the character sequence c = (c 1 , \u2022 \u2022 \u2022 , c n ) after char-level tokenization, and the target is the subword sequence y = (y 1 , \u2022 \u2022 \u2022 , y m ) after word segmentation and BPE (Sennrich et al., 2016) , where n and m are the source and target sequence lengths respectively. The word segmentation and BPE operation at the target end are the same as subword-level MT (Vaswani et al., 2017) , and char-level tokenization is similar to character-level MT (Yang et al., 2016; Nikolov et al., 2018; Saunders et al., 2020) but not completely consistent. The char-level tokenization we proposed divides each source language character into a token, and other characters (such as numbers, other language characters) are still divided into a token according to complete words. An example of char-level tokenization is shown in Table 3 . In the result of char-level tokenization, each Chinese character is divided into a token, and the number (12) and English (UNIT) are entirely taken as a token, respectively. Char-level tokenization is more suitable for streaming transcription, which ensures that the newly input content at each step in streaming transcription is a complete token, and the input prefix does not change in any way. The robustness of char-level ST is greatly improved with the complete token and stable prefix.", |
|
"cite_spans": [ |
|
{ |
|
"start": 311, |
|
"end": 334, |
|
"text": "(Sennrich et al., 2016)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 499, |
|
"end": 521, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 585, |
|
"end": 604, |
|
"text": "(Yang et al., 2016;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 605, |
|
"end": 626, |
|
"text": "Nikolov et al., 2018;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 627, |
|
"end": 649, |
|
"text": "Saunders et al., 2020)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 950, |
|
"end": 957, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Char-Level Simultaneous Translation", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "Why char-level simultaneous translation? Motivating our use of char-level ST we consider three desiderata. 1) With the incremental source input, char-level ST is more robust since it avoids unstable prefixes caused by word segmentation, as shown in Table 1 . 2) Char-level ST can obtain a more fine-grained latency, because if one character is enough to express the meaning of a entire word, the ST system does not have to wait for the complete word before translating. 3) Char-level ST only performs char-level tokenization on the source, while the target still retains subword-level tokenization, so its translation performance will not be affected too much, as shown in Table 7 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 249, |
|
"end": 256, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 673, |
|
"end": 680, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Char-Level Simultaneous Translation", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "Input Sentence \u6b22\u8fce\u6765\u5230UNIT\u7cfb\u7edf\u7684\u7b2c12\u671f\u9ad8\u7ea7\u8bfe\u7a0b\u3002 Output Sentence welcome to the 12th advanced course on UNIT system . subword-level MT", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Char-Level Simultaneous Translation", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "\u6b22\u8fce / \u6765\u5230 / UN@@ / IT / \u7cfb\u7edf / \u7684 / \u7b2c / 12@@ / \u671f / \u9ad8\u7ea7 / \u8bfe\u7a0b / \u3002 character-level MT \u6b22 / \u8fce / \u6765 / \u5230 / U / N / I / T / \u7cfb / \u7edf / \u7684 / \u7b2c / 1 / 2 / \u671f / \u9ad8 / \u7ea7 / \u8bfe / \u7a0b / \u3002 S. char-level tokenization \u6b22 / \u8fce / \u6765 / \u5230 / UNIT / \u7cfb / \u7edf / \u7684 / \u7b2c / 12 / \u671f / \u9ad8 / \u7ea7 / \u8bfe / \u7a0b / \u3002 T. subword-level MT", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Char-Level Simultaneous Translation", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "welcome / to / the / 12@@ / th / advanced / course / on / UNIT / system / . Table 3 : An example of tokenization method applied by the char-level wait-k policy. For the source, we use char-level tokenization, which separates each source language character into separate segments, and divides the others by words. For the target, we apply the same operation as the conventional subword-level MT. The sentences marked in red are the source and target of our proposed ST model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 83, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Char-Level Simultaneous Translation", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "For the read / write policy, we apply the wait-k policy on the proposed char-level ST. The difference between char-level wait-k policy and standard waitk policy is that each token in standard wait-k policy is a subword, while each token in char-level wait-k policy is a character (other languages or Numbers are still words), as shown in Figure 1 . We rewrite g (t) in Eq. 1into g k (t) for charlevel wait-k policy, which represents the number of source tokens (Character) read in when outputting the target token y t , calculated as:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 338, |
|
"end": 346, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Read / Write Policy", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "g k (t) = min {k + t \u2212 1, |c|} , t = 1, 2, \u2022 \u2022 \u2022 (2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Read / Write Policy", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "where c is the input character sequence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Read / Write Policy", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "Another significant advantage of the standard wait-k policy is that it can obtain some implicit prediction ability in training, and char-level wait-k policy further strengthens the prediction ability and improves the stability of prediction. The reason is that the granularity of the char-level is smaller so that the prediction of char-level is simpler and more accurate than that of subword-level. As shown in Figure 1 , it is much simpler and more accurate to predict \"\u7cfb\u7edf\" given \"\u7cfb\", since there are few possible characters that can be followed by \"\u7cfb\".", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 412, |
|
"end": 420, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Read / Write Policy", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "To improve the quality of domain adaptation, we apply some modifications to all training corpus, including general domain and spoken language domain, to make them more closer to streaming transcription. Besides, we also augment the spoken language corpus to make up for the lack of data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Domain Adaptation", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "For training corpus, including general domain and spoken language domain, the most serious difference from streaming transcription is that each Original \u5404\u4f4d\u5f00\u53d1\u8005\u3001\u5404\u4f4d\u670b\u53cb \u4eec\uff0c\u5927\u5bb6\u4e0b\u5348\u597d\uff01 Depunctuation \u5404\u4f4d\u5f00\u53d1\u8005\u3001\u5404\u4f4d\u670b\u53cb \u4eec\uff0c\u5927\u5bb6\u4e0b\u5348\u597d Table 4 : An example of depunctuation operation, where the ending punctuation of the source sentence is deleted.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 206, |
|
"end": 213, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Depunctuation", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "sentence in streaming transcription usually lacks ending punctuation, as shown in Table 2 . Since the punctuation in the training corpus is complete, and the ending punctuation is often followed by < eos >, the model trained with them tends to wait for the source ending punctuation and then generate the corresponding target ending punctuation and < eos > to stop translating. As a result, given the unpunctuated input in streaming transcription, it is difficult for the model to generate target punctuation and < eos > to stop the translation. To this end, to strengthen the model's ability to translate punctuation from unpunctuated sentences, we delete the ending punctuation of the source sentence, and the target sentence remains unchanged, as shown in Table 4 . Note that our depunctuation operation is limited to the ending punctuation at the end of the source sentence ('\u3002','\uff01','\uff1f').", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 82, |
|
"end": 89, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 759, |
|
"end": 766, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Depunctuation", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "For the spoken language domain corpus, since the data size is too small, we perform data augmentation on the source sentence. For each source sentence, we perform 5 operations: add a comma, add a tone character, copy an adjacent character, replace a character with its homophone, or delete a character. Meanwhile, the target sentence remains unchanged. The proposed method improves the robustness of the model while augmenting the data. An example of data augmentation is shown in ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Augmentation", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "Our method is based on Transformer (Vaswani et al., 2017) , and the training is divided into two stages. First, we pre-train an ST model on the general domain MT corpus, and then fine-tune the ST model on the spoken language domain corpus. For pre-training, we apply multi-path (Elbayad et al., 2020) and future-guided (Zhang et al., 2020b) , to enhance the predict ability and avoid the huge consumption caused by training different models for each k. For fine-tuning, we apply the original prefix-to-prefix framework (Ma et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 35, |
|
"end": 57, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 278, |
|
"end": 300, |
|
"text": "(Elbayad et al., 2020)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 319, |
|
"end": 340, |
|
"text": "(Zhang et al., 2020b)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 519, |
|
"end": 536, |
|
"text": "(Ma et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Methods", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "To improve the predictive ability of the ST model, we apply the future-guided training proposed by (Zhang et al., 2020b) . Besides the incremental Transformer for simultaneous translation with charlevel wait-k policy, we introduce a full-sentence Transformer, used as the teacher of the incremental Transformer for ST through knowledge distillation. The full-sentence Transformer is trained with crossentropy loss:", |
|
"cite_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 120, |
|
"text": "(Zhang et al., 2020b)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-training", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L (\u03b8 f ull ) = \u2212 (c,y)\u2208Dg |y| t=1 log p \u03b8 f ull (y t | y <t , c)", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Pre-training", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "where \u03b8 f ull is the parameter of full-sentence Transformer, D g is the general domain corpus. For the incremental Transformer for ST, since it applies char-level wait-k policy, the source tokens participating in translating are limited to less than g k (t) when decoding the t th target token. For each k, the decoding probability is calculated as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-training", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "p (y | c, k) = |y| t=1 p \u03b8 incr y t | y <t , c \u2264g k (t) (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-training", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "where c and y are the input character sequence and the output subword sequence, respectively. c \u2264g k (t) represents the first g k (t) tokens of c. \u03b8 incr is the parameter of incremental Transformer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-training", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "Following Elbayad et al. (2020) , to cover all possible k during training, we apply multi-path training. k is not fixed during training, but randomly and uniformly sampled from K, where", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 31, |
|
"text": "Elbayad et al. (2020)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-training", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "K = [1, \u2022 \u2022 \u2022 , |c|]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-training", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "is the set of all possible values of k. Incremental Transformer is also trained with cross-entropy loss:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-training", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "L (\u03b8 incr ) = \u2212 (c,y)\u2208Dg |y| t=1,k\u223cU (K) log p \u03b8 incr y t | y <t , c \u2264g k (t) (5)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-training", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "For the knowledge distillation between fullsentence Transformer and incremental Transformer, we apply L 2 regularization term between their encoder hidden states, calculated as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-training", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "L z incr , z f ull = 1 |c| |c| i=1 z incr i \u2212 z f ull i 2 (6)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-training", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "where z incr and z f ull represent the hidden states of incremental Transformer and full-sentence Transformer, respectively. Finally, the total loss L is calculated as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-training", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "L = L (\u03b8 incr ) + L (\u03b8 f ull ) + \u03bbL z incr , z f ull (7)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-training", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "where \u03bb is the hyper-parameter, and we set \u03bb = 0.1 in our system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-training", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "After pre-training an ST model, we use spoken language domain corpus for fine-tuning. The spoken language domain corpus is a small dataset, and meanwhile most of the word order between the target and the source is the same, so we do not continue to use multi-path and future-guided training methods. We fix k and use the original prefix-toprefix framework for training, and train different models for each k. Given k, the incremental Transformer is trained with cross-entropy loss:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fine-tuning", |
|
"sec_num": "4.3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L (\u03b8 incr , k) = \u2212 (c,y)\u2208Ds |y| t=1, log p \u03b8 incr y t | y <t , c \u2264g k (t)", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "Fine-tuning", |
|
"sec_num": "4.3.2" |
|
}, |
|
{ |
|
"text": "where D s is the spoken language domain corpus. Finally, for each k, we fine-tuned a ST model. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fine-tuning", |
|
"sec_num": "4.3.2" |
|
}, |
|
{ |
|
"text": "The dataset for Chinese \u2192 English task provided by the organizer contains three parts, shown in Table 6. CWMT19 2 is the general domain corpus that consists of 9,023,708 sentence pairs. Transcription consists of 37,901 sentence pairs and Dev. Set consists of 956 sentence pairs 3 , which are both spoken language domain corpus collected from real speeches (Zhang et al., 2021) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 356, |
|
"end": 376, |
|
"text": "(Zhang et al., 2021)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We use CWMT19 to pre-train the ST model, then use Transcription for fine-tuning, and finally evaluate the latency and translation quality of our system on Dev. Set. Note that we use the streaming transcription provided by the organizer for testing. Streaming transcription consists of 23,836 lines, which are composed by breaking each sentence in Dev. Set into lines whose length is incremented by one word until the end of the sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We eliminate the corpus with a huge ratio in length between source and target from CWMT19, and finally got 8,646,245 pairs of clean corpus. We augment the Transcription data according to the method in Sec.4.2.2, and get 227,406 sentence pairs. Meanwhile, for both CWMT19 and Transcription, we remove the ending punctuation according to the method in Sec.4.2.1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Given the processed corpus after cleaning and augmentation, we first perform char-level tokenization (Sec.4.1) on the Chinese sentences, and tokenize and lowercase English sentences with the Moses 4 . We apply BPE (Sennrich et al., 2016) with 16K merge operations on English.", |
|
"cite_spans": [ |
|
{ |
|
"start": 214, |
|
"end": 237, |
|
"text": "(Sennrich et al., 2016)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We set the standard wait-k policy as the baseline and compare our method with it. We conducted experiments on the following systems: Table 7 : Results of offline model. '+FT': +fine-tuning.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 133, |
|
"end": 140, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "System Setting", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Offline: offline model, full-sentence MT based on Transformer. We report the results of the subword-level / char-level offline model with greedy / beam search respectively in Table 7 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 175, |
|
"end": 182, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "System Setting", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Standard Wait-k: standard subword-level waitk policy proposed by Ma et al. (2019) , used as our baseline. For comparison, we apply the same training method as our method (Sec.4.3) to train it.", |
|
"cite_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 81, |
|
"text": "Ma et al. (2019)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Setting", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Standard Wait-k + rm Last Token: standard subword-level wait-k policy. In the inference time, the last token after the word segmentation is remove to prevent it from being incomplete.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Setting", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Char-Level Wait-k: our proposed method, refer to Sec.4 for details.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Setting", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The implementation of all systems is based on Transformer-Big, and adapted from Fairseq Library (Ott et al., 2019) . The parameters are the same as the original Transformer (Vaswani et al., 2017) . All systems are trained on 4 RTX-3090 GPUs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 114, |
|
"text": "(Ott et al., 2019)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 173, |
|
"end": 195, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Setting", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "For evaluation metric, we use BLEU 5 (Papineni et al., 2002) and AL 6 (Ma et al., 2019) to measure translation quality and latency, respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 60, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 70, |
|
"end": 87, |
|
"text": "(Ma et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metric", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Latency metric AL of char-level wait-k policy is calculated with g k (t) in Eq. 2:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metric", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "AL = 1 \u03c4 \u03c4 t=1 g k (t) \u2212 t \u2212 1 |y| |c| (9)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metric", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "where \u03c4 = argmax", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metric", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "t (g k (t) = |c|)", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "Evaluation Metric", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "where c and y are the input character sequence and the output subword sequence, respectively. Note that since the streaming transcription provided by the organizer adds a source character at each step, for all systems, we use character-level AL to evaluate the latency. showing the results of proposed char-level wait-k, standard wait-k, standard wait-k+rm last token and offline model with greedy/beam search.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metric", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We compared the performance of our proposed char-level wait-k policy and subword-level wait-k policy, and set k = 1, 2, . . . , 15 to draw the curve of translation quality against latency, as shown in Figure 2 . Note that the same value of k for charlevel wait-k policy and subword-level wait-k policy does not mean that the latency of the two are similar, because lagging k tokens in char-level waitk means strictly waiting for k characters, while for subword-level wait-k, it waits for k subwords, which contain more characters. 'Char-Level Wait-k' outperforms 'Standard Wait-k' and 'Standard Wait-k+rm Last Token' at all latency, and improves about 6 BLEU at low latency (AL=1.10). Besides, char-level wait-k performs more stable and robust than standard wait-k when dealing with streaming transcription input, because char-level wait-k has a stable prefix while the prefix of standard wait-k may change between adjacent steps due to the different word segmentation results. 'Standard Wait-k+rm Last Token' solves the issue that the last token may be incomplete, so that the translation quality is higher than Standard Wait-k under the same k, which improves about 0.56 BLEU (average on all k). However, 'Standard Wait-k+rm Last Token' increases the latency. Compared with 'Standard Wait-k', it waits for one more token on average under the same k. Therefore, from the overall curve, the improvement of 'Standard Wait-k+rm Last Token' is limited.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 201, |
|
"end": 209, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Main Result", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Char-level wait-k is particularly outstanding at low latency, and it achieves good translation quality even when the AL is less than 0. It is worth mentioning that the reason why the AL is less than 0 is that the generated translation is shorter and |y| |c| in Eq.(9) is greater than 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Main Result", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "To analyze the effect of data processing, including 'Depunctuation' and 'Data Augmentation', we show the results without them in Figure 3 . We notice that data augmentation improves the translation quality of the model by 1.61 BLEU (average on all k), and the model becomes more stable and robust. 'Depunctation' is even more important. If we keep the ending punctuation in the training corpus, the translation quality of the model drops by 2.27 BLEU, and the latency increase by 2.83 (average on all k). This is because streaming transcription input has no ending punctuation, which makes the model hard to generate target ending punctuation and tend to translate longer translations since it is difficult to generate < eos > without target ending punctuation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 137, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Effect of Data Processing", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "To enhance the performance and robustness under low latency, we combine future-guided and multipath training methods in pre-training. To verify the effectiveness of the two training methods, we conducted an ablation study on them, and show the results of removing one of them in Figure 4 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 279, |
|
"end": 287, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Study on Training Methods", |
|
"sec_num": "5.6" |
|
}, |
|
{ |
|
"text": "When removing one of them, the translation quality decreases, especially at low latency. When the 'Future-guided' is removed, the translation quality decreases by 1.49 BLEU (average on all 4 2 0 2 4 6 8 10 12 k), and when the 'Multi-path' is removed, the translation quality decreases by 0.76 BLEU (average on all k). This shows that two training methods can both effectively improve the translation quality under low latency, especially 'Future-guided'.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ablation Study on Training Methods", |
|
"sec_num": "5.6" |
|
}, |
|
{ |
|
"text": "Previous ST methods are mainly divided into precise read / write policy and stronger predictive ability.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "For read / write policy, early policies used segmented translation, and applied full sentence translation to each segment (Bangalore et al., 2012; Cho and Esipova, 2016; . Gu et al. (2017) trained an agent through reinforcement learning to decide read / write. Dalvi et al. (2018) proposed STATIC-RW, which first performing S's READs, then alternately performing RW 's WRITEs and READs. Ma et al. (2019) proposed wait-k policy, wherein first reads k tokens and then begin synchronizing write and read. Wait-k policy has achieved remarkable performance because it is easy to train and stable, and is widely used in simultaneous translation. Zheng et al. (2019a) generated the gold read / write sequence of input sentence by rules, and then trained an agent with the input sentences and gold read / write sequence. Zheng et al. (2019b) introduces a \"delay\" token {\u03b5} into the target vocabulary to read one more token. Arivazhagan et al. (2019) proposed MILK, which uses a Bernoulli distribution variable to determine whether to output. Ma et al. (2020) proposed MMA, the implementation of MILK based on Transformer. Zheng et al. (2020) proposed a decoding policy that uses multiple fixed models to accomplish adaptive decoding. Zhang et al. (2020a) propose a novel adaptive segmentation policy for ST.", |
|
"cite_spans": [ |
|
{ |
|
"start": 122, |
|
"end": 146, |
|
"text": "(Bangalore et al., 2012;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 147, |
|
"end": 169, |
|
"text": "Cho and Esipova, 2016;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 172, |
|
"end": 188, |
|
"text": "Gu et al. (2017)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 280, |
|
"text": "Dalvi et al. (2018)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 387, |
|
"end": 403, |
|
"text": "Ma et al. (2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 640, |
|
"end": 660, |
|
"text": "Zheng et al. (2019a)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 813, |
|
"end": 833, |
|
"text": "Zheng et al. (2019b)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 916, |
|
"end": 941, |
|
"text": "Arivazhagan et al. (2019)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1034, |
|
"end": 1050, |
|
"text": "Ma et al. (2020)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 1114, |
|
"end": 1133, |
|
"text": "Zheng et al. (2020)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 1226, |
|
"end": 1246, |
|
"text": "Zhang et al. (2020a)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "For predicting future, Matsubara et al. (2000) applied pattern recognition to predict verbs in advance. Grissom II et al. (2014) used a Markov chain to predict the next word and final verb. (Oda et al., 2015) predict unseen syntactic constituents to help generate complete parse trees and perform syntax-based simultaneous translation. added a Predict operation to the agent based on Gu et al. (2017) , predicting the next word as an additional input. Elbayad et al. (2020) enhances the wait-k policy by sampling different k to train. Zhang et al. (2020b) proposed future-guided training, which introduces a full-sentence Transformer as the teacher of the ST model and uses future information to guide training through knowledge distillation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 23, |
|
"end": 46, |
|
"text": "Matsubara et al. (2000)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 104, |
|
"end": 128, |
|
"text": "Grissom II et al. (2014)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 190, |
|
"end": 208, |
|
"text": "(Oda et al., 2015)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 384, |
|
"end": 400, |
|
"text": "Gu et al. (2017)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 452, |
|
"end": 473, |
|
"text": "Elbayad et al. (2020)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 535, |
|
"end": 555, |
|
"text": "Zhang et al. (2020b)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Although the previous methods performed well, they were all evaluated on the traditional MT corpus instead of the real streaming spoken language corpus. Therefore, the previous methods all ignore the robustness and domain adaptation of the ST model in the face of real streaming input. Our method bridgs the gap between the MT corpus and the streaming spoken language domain input, and is more robust and adaptable to the spoken language domain.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Our submitted system won the first place in Au-toSimTrans 2021, which is described in this paper. For streaming transcription input from the real scenarios, our proposed char-level wait-k policy is more robust than standard subword-level wait-k. Besides, we also propose two data processing operations to improve the spoken language domain adaptability. For training, we combine two existing training methods that have been proven effective. The experiment on the data provided by the organizer proves the superiority of our method, especially at low latency.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "In this competition, we implemented the charlevel wait-k policy on the Chinese source. For some language pairs with a large length ratio between the source (char) and the target (bpe), we can read multiple characters at each step to prevent the issue caused by the excessively long char-level source. We put the char-level simultaneous translation on other languages (such as German and English) for both fixed and adaptive policy into our future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "casia2015, casict2011, casict2015, datum2015, da-tum2017 and neu2017. http://mteval.cipsc.org. cn:81/agreement/AutoSimTrans 3 https://dataset-bj.cdn.bcebos.com/ qianyan%2FAST_Challenge.zip 4 http://www.statmt.org/moses/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The script for calculating BLEU is provided by the organizer from https://dataset-bj.cdn.bcebos. com/qianyan%2FAST_Challenge.zip.6 The calculation of AL is as https://github.com/ autosimtrans/SimulTransBaseline/blob/ master/latency.py.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank all the anonymous reviewers for their insightful and valuable comments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Prediction improves simultaneous neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Ashkan", |
|
"middle": [], |
|
"last": "Alinejad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maryam", |
|
"middle": [], |
|
"last": "Siahbani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anoop", |
|
"middle": [], |
|
"last": "Sarkar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3022--3027", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1337" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashkan Alinejad, Maryam Siahbani, and Anoop Sarkar. 2018. Prediction improves simultaneous neu- ral machine translation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 3022-3027, Brussels, Belgium. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Monotonic Infinite Lookback Attention for Simultaneous Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Naveen", |
|
"middle": [], |
|
"last": "Arivazhagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Cherry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wolfgang", |
|
"middle": [], |
|
"last": "Macherey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chung-Cheng", |
|
"middle": [], |
|
"last": "Chiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Semih", |
|
"middle": [], |
|
"last": "Yavuz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruoming", |
|
"middle": [], |
|
"last": "Pang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Raffel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1313--1323", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/p19-1126" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Naveen Arivazhagan, Colin Cherry, Wolfgang Macherey, Chung-cheng Chiu, Semih Yavuz, Ruoming Pang, Wei Li, and Colin Raffel. 2019. Monotonic Infinite Lookback Attention for Simulta- neous Machine Translation. pages 1313-1323.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Real-time incremental speech-tospeech translation of dialogs", |
|
"authors": [ |
|
{ |
|
"first": "Srinivas", |
|
"middle": [], |
|
"last": "Bangalore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vivek", |
|
"middle": [], |
|
"last": "Kumar Rangarajan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prakash", |
|
"middle": [], |
|
"last": "Sridhar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ladan", |
|
"middle": [], |
|
"last": "Kolan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aura", |
|
"middle": [], |
|
"last": "Golipour", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jimenez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "437--445", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Srinivas Bangalore, Vivek Kumar Rangarajan Srid- har, Prakash Kolan, Ladan Golipour, and Aura Jimenez. 2012. Real-time incremental speech-to- speech translation of dialogs. In Proceedings of the 2012 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 437-445, Montr\u00e9al, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Revisiting Character-Based Neural Machine Translation with Capacity and Compression", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Cherry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Foster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Bapna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Orhan", |
|
"middle": [], |
|
"last": "Firat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wolfgang", |
|
"middle": [], |
|
"last": "Macherey", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4295--4305", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1461" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Cherry, George Foster, Ankur Bapna, Orhan Firat, and Wolfgang Macherey. 2018. Revisiting Character-Based Neural Machine Translation with Capacity and Compression. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 4295-4305, Brussels, Belgium. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Can neural machine translation do simultaneous translation?", |
|
"authors": [ |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masha", |
|
"middle": [], |
|
"last": "Esipova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kyunghyun Cho and Masha Esipova. 2016. Can neural machine translation do simultaneous translation?", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Incremental decoding and training methods for simultaneous translation in neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Fahim", |
|
"middle": [], |
|
"last": "Dalvi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nadir", |
|
"middle": [], |
|
"last": "Durrani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hassan", |
|
"middle": [], |
|
"last": "Sajjad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Vogel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "493--499", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-2079" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fahim Dalvi, Nadir Durrani, Hassan Sajjad, and Stephan Vogel. 2018. Incremental decoding and training methods for simultaneous translation in neu- ral machine translation. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 493-499, New Orleans, Louisiana. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Efficient wait-k models for simultaneous machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Maha", |
|
"middle": [], |
|
"last": "Elbayad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Besacier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Verbeek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maha Elbayad, Laurent Besacier, and Jakob Verbeek. 2020. Efficient wait-k models for simultaneous ma- chine translation.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Character-Level Translation with Self-attention", |
|
"authors": [ |
|
{ |
|
"first": "Yingqiang", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Nikola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuhuang", |
|
"middle": [], |
|
"last": "Nikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [ |
|
"H R" |
|
], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hahnloser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1591--1604", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.145" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yingqiang Gao, Nikola I. Nikolov, Yuhuang Hu, and Richard H.R. Hahnloser. 2020. Character-Level Translation with Self-attention. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 1591-1604, On- line. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Don't until the final verb wait: Reinforcement learning for simultaneous machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Alvin", |
|
"middle": [], |
|
"last": "Grissom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "He", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jordan", |
|
"middle": [], |
|
"last": "Boyd-Graber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Morgan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hal", |
|
"middle": [], |
|
"last": "Daum\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iii", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1342--1352", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/D14-1140" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alvin Grissom II, He He, Jordan Boyd-Graber, John Morgan, and Hal Daum\u00e9 III. 2014. Don't until the final verb wait: Reinforcement learning for simul- taneous machine translation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1342-1352, Doha, Qatar. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Learning to translate in real-time with neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Victor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 15th Conference of the European Chapter", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1053--1062", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiatao Gu, Graham Neubig, Kyunghyun Cho, and Vic- tor O.K. Li. 2017. Learning to translate in real-time with neural machine translation. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 1, Long Papers, pages 1053-1062, Valencia, Spain. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Fully Character-Level Neural Machine Translation without Explicit Segmentation", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Hofmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "365--378", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00067" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Lee, Kyunghyun Cho, and Thomas Hofmann. 2017. Fully Character-Level Neural Machine Trans- lation without Explicit Segmentation. Transactions of the Association for Computational Linguistics, 5:365-378.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Association for Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Minqin", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haodong", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuanjie", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sijia", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liting", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuhang", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the First Workshop on Automatic Simultaneous Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "37--44", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.autosimtrans-1.6" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minqin Li, Haodong Cheng, Yuanjie Wang, Sijia Zhang, Liting Wu, and Yuhang Guo. 2020. BIT's system for the AutoSimTrans 2020. In Proceedings of the First Workshop on Automatic Simultaneous Translation, pages 37-44, Seattle, Washington. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Character-based Neural Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Wang", |
|
"middle": [], |
|
"last": "Ling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isabel", |
|
"middle": [], |
|
"last": "Trancoso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Black", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1511.04586" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wang Ling, Isabel Trancoso, Chris Dyer, and Alan W. Black. 2015. Character-based Neural Machine Translation. arXiv:1511.04586 [cs].", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "STACL: Simultaneous translation with implicit anticipation and controllable latency using prefix-to-prefix framework", |
|
"authors": [ |
|
{ |
|
"first": "Mingbo", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Renjie", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaibo", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Baigong", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chuanqiang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhongjun", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hairong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xing", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hua", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haifeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3025--3036", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1289" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mingbo Ma, Liang Huang, Hao Xiong, Renjie Zheng, Kaibo Liu, Baigong Zheng, Chuanqiang Zhang, Zhongjun He, Hairong Liu, Xing Li, Hua Wu, and Haifeng Wang. 2019. STACL: Simultaneous translation with implicit anticipation and control- lable latency using prefix-to-prefix framework. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3025-3036, Florence, Italy. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Liezl Puzon, and Jiatao Gu. 2020. Monotonic multihead attention", |
|
"authors": [ |
|
{ |
|
"first": "Xutai", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [ |
|
"Miguel" |
|
], |
|
"last": "Pino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Cross", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xutai Ma, Juan Miguel Pino, James Cross, Liezl Pu- zon, and Jiatao Gu. 2020. Monotonic multihead attention. In International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Simultaneous japenese-english interpretation based on early predictoin of english verb", |
|
"authors": [ |
|
{ |
|
"first": "Keiichi", |
|
"middle": [], |
|
"last": "Matsubara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shigeki", |
|
"middle": [], |
|
"last": "Iwashima", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nobuo", |
|
"middle": [], |
|
"last": "Kawaguchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katsuhiko", |
|
"middle": [], |
|
"last": "Toyama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yasuyoshi", |
|
"middle": [], |
|
"last": "Inagaki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of the 4th Symposium on Natural Languauge Processing(SNLP-2000)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "268--273", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Keiichi Matsubara, Shigeki Iwashima, Nobuo Kawaguchi, Katsuhiko Toyama, and Yasuyoshi Inagaki. 2000. Simultaneous japenese-english interpretation based on early predictoin of english verb. In Proceedings of the 4th Symposium on Natural Languauge Processing(SNLP-2000), pages 268-273.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Character-level Chinese-English Translation through ASCII Encoding", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Nikola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuhuang", |
|
"middle": [], |
|
"last": "Nikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mi", |
|
"middle": [ |
|
"Xue" |
|
], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [ |
|
"H R" |
|
], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hahnloser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Third Conference on Machine Translation: Research Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "10--16", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-6302" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikola I. Nikolov, Yuhuang Hu, Mi Xue Tan, and Richard H.R. Hahnloser. 2018. Character-level Chinese-English Translation through ASCII Encod- ing. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 10- 16, Brussels, Belgium. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Syntax-based simultaneous translation through prediction of unseen syntactic constituents", |
|
"authors": [ |
|
{ |
|
"first": "Yusuke", |
|
"middle": [], |
|
"last": "Oda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sakriani", |
|
"middle": [], |
|
"last": "Sakti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomoki", |
|
"middle": [], |
|
"last": "Toda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Satoshi", |
|
"middle": [], |
|
"last": "Nakamura", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "198--207", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/P15-1020" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yusuke Oda, Graham Neubig, Sakriani Sakti, Tomoki Toda, and Satoshi Nakamura. 2015. Syntax-based simultaneous translation through prediction of un- seen syntactic constituents. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 198-207, Beijing, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "fairseq: A fast, extensible toolkit for sequence modeling", |
|
"authors": [ |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Edunov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexei", |
|
"middle": [], |
|
"last": "Baevski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Grangier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Auli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "48--53", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-4009" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, and Michael Auli. 2019. fairseq: A fast, extensible toolkit for sequence modeling. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations), pages 48-53, Minneapolis, Min- nesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1073083.1073135" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Bleu: a method for au- tomatic evaluation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Improving Character-Based Decoding Using Target-Side Morphological Information for Neural Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Peyman", |
|
"middle": [], |
|
"last": "Passban", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Way", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "58--68", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-1006" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peyman Passban, Qun Liu, and Andy Way. 2018. Im- proving Character-Based Decoding Using Target- Side Morphological Information for Neural Ma- chine Translation. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 58-68, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Inference-only sub-character decomposition improves translation of unseen logographic characters", |
|
"authors": [ |
|
{ |
|
"first": "Danielle", |
|
"middle": [], |
|
"last": "Saunders", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weston", |
|
"middle": [], |
|
"last": "Feely", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Byrne", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 7th Workshop on Asian Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "170--177", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Danielle Saunders, Weston Feely, and Bill Byrne. 2020. Inference-only sub-character decomposition improves translation of unseen logographic charac- ters. In Proceedings of the 7th Workshop on Asian Translation, pages 170-177, Suzhou, China. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "How Grammatical is Characterlevel Neural Machine Translation? Assessing MT Quality with Contrastive Translation Pairs", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "376--382", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich. 2017. How Grammatical is Character- level Neural Machine Translation? Assessing MT Quality with Contrastive Translation Pairs. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 2, Short Papers, pages 376-382, Valencia, Spain. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Neural machine translation of rare words with subword units", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1715--1725", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715- 1725, Berlin, Germany. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Simultaneous translation using optimized segmentation", |
|
"authors": [ |
|
{ |
|
"first": "Maryam", |
|
"middle": [], |
|
"last": "Siahbani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hassan", |
|
"middle": [], |
|
"last": "Shavarani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashkan", |
|
"middle": [], |
|
"last": "Alinejad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anoop", |
|
"middle": [], |
|
"last": "Sarkar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 13th Conference of the Association for Machine Translation in the Americas", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "154--167", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maryam Siahbani, Hassan Shavarani, Ashkan Alinejad, and Anoop Sarkar. 2018. Simultaneous translation using optimized segmentation. In Proceedings of the 13th Conference of the Association for Machine Translation in the Americas (Volume 1: Research Papers), pages 154-167, Boston, MA. Association for Machine Translation in the Americas.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Understanding Pure Character-Based Neural Machine Translation: The Case of Translating Finnish into English", |
|
"authors": [ |
|
{ |
|
"first": "Gongbo", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4251--4262", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gongbo Tang, Rico Sennrich, and Joakim Nivre. 2020. Understanding Pure Character-Based Neu- ral Machine Translation: The Case of Translat- ing Finnish into English. In Proceedings of the 28th International Conference on Computational Linguistics, pages 4251-4262, Barcelona, Spain (Online). International Committee on Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Ben- gio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems 30, pages 5998-6008. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "A Character-Aware Encoder for Neural Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Zhen", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Feng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3063--3070", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhen Yang, Wei Chen, Feng Wang, and Bo Xu. 2016. A Character-Aware Encoder for Neural Machine Translation. In Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers, pages 3063-3070, Osaka, Japan. The COLING 2016 Organizing Com- mittee.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Bstc: A large-scale chinese-english speech translation dataset", |
|
"authors": [ |
|
{ |
|
"first": "Ruiqing", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiyang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chuanqiang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhongjun", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hua", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haifeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ying", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qinfei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2104.03575" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruiqing Zhang, Xiyang Wang, Chuanqiang Zhang, Zhongjun He, Hua Wu, Zhi Li, Haifeng Wang, Ying Chen, and Qinfei Li. 2021. Bstc: A large-scale chinese-english speech translation dataset. arXiv preprint arXiv:2104.03575.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Learning adaptive segmentation policy for simultaneous translation", |
|
"authors": [ |
|
{ |
|
"first": "Ruiqing", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chuanqiang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhongjun", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hua", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haifeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2280--2289", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.178" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruiqing Zhang, Chuanqiang Zhang, Zhongjun He, Hua Wu, and Haifeng Wang. 2020a. Learning adaptive segmentation policy for simultaneous translation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2280-2289, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Future-guided incremental transformer for simultaneous translation", |
|
"authors": [ |
|
{ |
|
"first": "Shaolei", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liangyou", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shaolei Zhang, Yang Feng, and Liangyou Li. 2020b. Future-guided incremental transformer for simulta- neous translation.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Simultaneous translation policies: From fixed to adaptive", |
|
"authors": [ |
|
{ |
|
"first": "Baigong", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaibo", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Renjie", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingbo", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hairong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2847--2853", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.254" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Baigong Zheng, Kaibo Liu, Renjie Zheng, Mingbo Ma, Hairong Liu, and Liang Huang. 2020. Simul- taneous translation policies: From fixed to adaptive. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 2847-2853, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Simpler and faster learning of adaptive policies for simultaneous translation", |
|
"authors": [ |
|
{ |
|
"first": "Baigong", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Renjie", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingbo", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1349--1354", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1137" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Baigong Zheng, Renjie Zheng, Mingbo Ma, and Liang Huang. 2019a. Simpler and faster learn- ing of adaptive policies for simultaneous transla- tion. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 1349-1354, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Simultaneous translation with flexible policy via restricted imitation learning", |
|
"authors": [ |
|
{ |
|
"first": "Baigong", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Renjie", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingbo", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5816--5822", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1582" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Baigong Zheng, Renjie Zheng, Mingbo Ma, and Liang Huang. 2019b. Simultaneous translation with flex- ible policy via restricted imitation learning. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 5816-5822, Florence, Italy. Association for Compu- tational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"text": "Standard wait-k policy vs. our char-level wait-k policy (take k = 2 as an example).", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF3": { |
|
"text": "Translation quality (BLEU) against latency (AL) on Chinese \u2192 English simultaneous translation,", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF4": { |
|
"text": "Result of our method without depunctuation or data augmentation.", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF6": { |
|
"text": "Ablation study on two training methods.", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"text": "", |
|
"content": "<table><tr><td>: An example of streaming transcription output</td></tr><tr><td>track of the Chinese-English translation task.</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"text": "", |
|
"content": "<table><tr><td>Original</td><td>1957\u5e74\u6211\u5230\u5317\u4eac\u4e0a\u5927\u5b66</td></tr><tr><td>Add Comma</td><td>1957\u5e74\uff0c\u6211\u5230\u5317\u4eac\u4e0a\u5927\u5b66</td></tr><tr><td>Add Tone character</td><td>1957\u5e74\u6211\u554a\u5230\u5317\u4eac\u4e0a\u5927\u5b66</td></tr><tr><td>Copy Character</td><td>1957\u5e74\u6211\u5230\u5317\u5317\u4eac\u4e0a\u5927\u5b66</td></tr><tr><td>Replace Homophone</td><td>1957\u5e74\u6211\u5230\u5317\u7ecf\u4e0a\u5927\u5b66</td></tr><tr><td>Delete Character</td><td>1957\u5e74\u6211\u5230\u5317\u4eac\u4e0a\u5927\u5b66</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"text": "An example of data augmentation.", |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF5": { |
|
"text": "Statistics of Chinese \u2192 English datasets.", |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null |
|
} |
|
} |
|
} |
|
} |