|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T03:43:05.240471Z" |
|
}, |
|
"title": "Filtering Noisy Parallel Corpus using Transformers with Proxy Task Learning", |
|
"authors": [ |
|
{ |
|
"first": "Haluk", |
|
"middle": [], |
|
"last": "Acarcicek", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Huawei Technologies", |
|
"location": { |
|
"country": "Turkey" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Talha", |
|
"middle": [], |
|
"last": "Colakoglu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Huawei Technologies", |
|
"location": { |
|
"country": "Turkey" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Pinar", |
|
"middle": [ |
|
"Ece" |
|
], |
|
"last": "Aktan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Huawei Technologies", |
|
"location": { |
|
"country": "Turkey" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Chongxuan", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Huawei Technologies", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Huawei Technologies", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper illustrates Huawei's submission to the WMT20 low-resource parallel corpus filtering shared task. Our approach focuses on developing a proxy task learner on top of a transformer-based multilingual pre-trained language model to boost the filtering capability for noisy parallel corpora. Such a supervised task also helps us to iterate much more quickly than using an existing neural machine translation system to perform the same task. After performing empirical analyses of the finetuning task, we benchmark our approach by comparing the results with past years' state-of-theart records. This paper wraps up with a discussion of limitations and future work. The scripts for this study will be made publicly available. 1", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper illustrates Huawei's submission to the WMT20 low-resource parallel corpus filtering shared task. Our approach focuses on developing a proxy task learner on top of a transformer-based multilingual pre-trained language model to boost the filtering capability for noisy parallel corpora. Such a supervised task also helps us to iterate much more quickly than using an existing neural machine translation system to perform the same task. After performing empirical analyses of the finetuning task, we benchmark our approach by comparing the results with past years' state-of-theart records. This paper wraps up with a discussion of limitations and future work. The scripts for this study will be made publicly available. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Crawling web has been regarded as a de facto approach to produce bitexts, yet the crawled texts are under-qualified often in some aspects to train a proper machine translation system. Underqualified bitexts present misalignments, no alignments, wrong language pairs, sentences mostly composed of numbers and mathematical formulas, etc. Parallel corpus filtering in this manner holds a critical research area to improve the performance of machine translation systems. WMT organizes a shared task for parallel corpus filtering since 2018 intending to filter our noisy bitexts to this end. The challenge targets low-resource language pairs since 2019.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Many existing filtering methods require multiple layers of elimination by implementing manually engineered features such as length filtering, language identification, normalizing, etc. These hand-picked features work well for a language pair but don't generalize well to another language pair or domain and often bring algorithmic complexity to the overall system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The LASER (Artetxe and Schwenk, 2019 ) model achieved state-of-the-art (SOTA) records at the WMT19 shared task on low-resource parallel corpus filtering . The sentence representation model implemented in LASER provides a means for measuring the similarity between a source and a target sentence. As stated in the future work at Artetxe and Schwenk (2019) , there is still space to improve. Utilizing a self-attention mechanism remains future work as the LASER was not built upon the latest transformer architecture (Vaswani et al., 2017) . We are also interested in designing a filtering tool that can be efficiently applied to a wide range of language pairs. Pre-trained multilingual language models, such as BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) , are exploited to this end.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 36, |
|
"text": "(Artetxe and Schwenk, 2019", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 328, |
|
"end": 354, |
|
"text": "Artetxe and Schwenk (2019)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 515, |
|
"end": 537, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 715, |
|
"end": 736, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 749, |
|
"end": 767, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We make two contributions to the field in this manner. The first contribution is a proposal of approaching the filtering problem as a discrimination task that can be trained with a proxy task and synthetic training data generation (see in Section 3.1). The other contribution is the empirical knowledge learned from an analysis of the finetuning pre-trained multilingual language models on cross-lingual discrimination tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In the WMT18 shared task, participants mostly used similar techniques in components as prefiltering, scoring the sentence pairs, and using a classifier for feature functions. Teams applied prefiltering rules to eliminate noisy data, including:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 short or lengthy sentences;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 sentence pairs with few words and unbalanced token lengths;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 sentence pairs with unmatched names, numbers, web addresses, etc.;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 sentences where a language identifier fails to identify a source or target language type.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Scoring functions were mostly used to correlate qualified texts. Participants also used sentence embeddings (Bouamor and Sajjad, 2018; Axelrod et al., 2011; Artetxe and Schwenk, 2019) altogether with a similarity function to detect the similarity of pairs. The WMT19 shared task focused on lowresource languages, namely Nepali-English and Sinhala-English. Participants mostly applied basic filtering techniques similar to those used in 2018. used sentence embeddings that were trained on parallel sentence pairs. Another approach was to train a machine translation system on the clean data and then used it to translate the non-English side to make a comparison. Several metrics were used to match sentence pairs such as METEOR, Levenshtein distance, and BLEU.", |
|
"cite_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 134, |
|
"text": "(Bouamor and Sajjad, 2018;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 135, |
|
"end": 156, |
|
"text": "Axelrod et al., 2011;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 183, |
|
"text": "Artetxe and Schwenk, 2019)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We found that our work relates to the submission from Bernier-Colborne and Lo (2019). However, their submission was unable to show the effectiveness of the proposed method due to potential issues in the pretraining process. Besides the parallel corpus filtering task, we come across several works utilizing a similar approach. In Yang et al. (2019) , BERT rescoring method is more effective at bitext mining than heuristic scoring methods, i.e., marginal cosine distance. In Gr\u00e9goire and Langlais (2018), a similar negative random sampling technique has been used for generating synthetic bad pairs. Also, attempts to create harder negative pairs were proven effective in bitext mining (Guo et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 330, |
|
"end": 348, |
|
"text": "Yang et al. (2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 686, |
|
"end": 704, |
|
"text": "(Guo et al., 2018)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Transformer models are currently state-of-the-art systems on most NLP classification and regression tasks. With the emergence of multilingual pretrained models, their cross-lingual capabilities can be exploited with little effort.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "To treat this problem as a supervised one, we design a proxy learner to model this task. The correctly aligned pairs can be regarded as positive samples in a simple sense for binary classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proxy Task", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Most of the noise in the corpus originate from illaligned sentence pairs. The intuitive idea is to treat the misalignments as synthetic negative samples for our proxy task learner.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proxy Task", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Taking random samples of the target sentences for all source sentences was the easiest way to create negative samples. But this results in an easilyclassifiable training data which offers little assistance to the low-resource bitext filtering task. We need to create more valuable training data, which is referred to as harder examples.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proxy Task", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Instead of training transformers with easilydiscernible random negative samples, we need to create harder examples to confuse the model to boost its performance on the filtering task. We try the following ways to generate harder examples:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating Harder Examples", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "Neighborhood Awareness The neighbor sentences in the corpus have a higher chance of sharing common semantics and topics than those randomly extracted from corpus-wide. Alignment slips are most likely to occur in this context. This concept of neighborhood awareness inspires us to generate harder training data. For every positive pair, we create two negative pairs by pairing adjacent sentences of that target sentence with the source sentence. Incorporating this simple strategy may help to boost filtering performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating Harder Examples", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "Fuzzy String Matching Sampling Instead of randomly sampling negative examples from bitexts, we develop a new sampling strategy inspired by KNN (the k-nearest neighbors algorithm). To create harder examples for finetuning, we sampled lexically similar but semantically different sentences using a fuzzy string search method. 2 For each one of the source sentences (S), we perform a fuzzy search and identify the N similar sentence respecting to the fuzzy string score (F). We set a limit (L) on the F and ignore sentences with similarities over this limit (L) to avoid duplicated or highly related candidates. Then we pair the corresponded target sentences of those N similar sentences with the source sentences to create N negative pairs. We apply a setting with an L value of 60 (in a 100 scale) and N values of 2 and 3 to generate the validation and training data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating Harder Examples", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "Siamese Finetuning Bert-base-Multi-cased 0.62 0.69 Xlm-Roberta-Base 0.84 0.86 Xlm-Roberta-Large 0.88 0.92 Table 1 : Model performances on proxy task as accuracy in F1 scores.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 113, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We explore two candidate architecture in this study, one of which is a Siamese network (Reimers and Gurevych, 2019) . The other model is a pre-trained transformer with a binary classification learner to differentiate ok-aligned sentence pairs with their negative counterparts. A comparison between the performance of architecture can be seen in the Table 1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 115, |
|
"text": "(Reimers and Gurevych, 2019)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "Sentence Transformers Reimers and Gurevych (2019) adopt a Siamese architecture, which allows us to feed sentence pairs separately to a transformer network like BERT. Each sentence pairs are encoded into fixed-size embeddings connected to a classifier network. Embeddings can be compared using a cosine similarity function at the inference stage. We reach on par performance to the LASER in the WMT19 parallel corpus filtering task (Table 3).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "Transformer Finetuning with Pair Classification BERT is a language model introduced by Devlin et al. (2018) . A pre-trained BERT model can be finetuned by adding an extra output layer to address many NLP tasks. One of BERT's derivatives is RoBERTa (Liu et al., 2019) , and it is essentially very similar to its successor in structure. The authors of RoBERTa discarded the next sentence prediction (NSP) task and altered the mask language modeling task. We compare multilingual variations of BERT and RoBERTa, which contains both Khmer (km) and Pashto (ps) monolingual data in the pretraining. The multilingual version of the RoBERTa, aka XLM-R (Conneau et al., 2019) , performs far superior as it leverages more data in training (Table 1) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 107, |
|
"text": "Devlin et al. (2018)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 248, |
|
"end": 266, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 644, |
|
"end": 666, |
|
"text": "(Conneau et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 729, |
|
"end": 738, |
|
"text": "(Table 1)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Architecture", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "To observe the effect of the amount of the available parallel corpus on this proxy learner's performance, we try two different data regimes. The orange line in Figure 1 represents a very low resource setting, and we subsampled 2k parallel pairs to mimic that. The blue line represents a 10k subsampled version of the training data. As can be seen from Figure 1 , the more we increase the number of parallel sentences used in training the proxy task, the more performance we observe for the proxy task. Other than that, a system using almost as little as 2k parallel sentence pair is enough to beat the benchmark results. The proposed approach is promising for other low-resource domains and applications. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 168, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 352, |
|
"end": 360, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Amount of Parallel Data", |
|
"sec_num": "3.1.3" |
|
}, |
|
{ |
|
"text": "The amount of negative data that can be used in training is analyzed in the prior works (Section 2). Into our observations from Figure 1 and Figure 2, using larger negative ratios leads to better performances. However, it is better to keep the positive/negative ratio to 1 : 10 for our datasets with a presence of more parallel data.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 136, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 141, |
|
"end": 147, |
|
"text": "Figure", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Negative Sampling Ratio", |
|
"sec_num": "3.1.4" |
|
}, |
|
{ |
|
"text": "We oversample the positive pairs in the finetuning step to balance the positive-negative ratio. But it didn't make a noticeable change in proxy task performance or filtering performance. The immunity of the pre-trained transformer models to the class-imbalance up to 20x is very surprising.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Negative Sampling Ratio", |
|
"sec_num": "3.1.4" |
|
}, |
|
{ |
|
"text": "To prevent the catastrophic forgetting problem in the transformers, we apply a very small (2e \u22126 ) learning rate with the inverse root scheduler and a warmup step of 1, 000. We also try other learning rate schedulers like cyclic learning rate scheduler (CLR) from (Lee et al., 2020) but couldn't observe any benefit for this task. We suspect CLR may not apply to a finetuning process with a small epoch number (i.e., 2 epochs in this study).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning Rate", |
|
"sec_num": "3.1.5" |
|
}, |
|
{ |
|
"text": "We add a classification layer on top of XLM-R having 2, 048 hidden units with RELU activations and dropout. On single Nvidia V100 GPU, we finetune our models for 2 epochs without any early stopping. It takes about 6 hours to finetune on the generated datasets. The scoring step is just getting the probability of that pair being positive. Scoring a sentence pair takes 5ms on average.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Finetuning and Scoring", |
|
"sec_num": "3.1.6" |
|
}, |
|
{ |
|
"text": "Bidirectional Scoring Similar to the bidirectional scoring in Chaudhary et al. 2019, we reverse source and target sentences and train two different networks, which produce two different scores (SRC-TRG and TRG-SRC) for a pair. We then combine these two scores under (min, mean, max) strategies. In the \"min\" strategy, we aim to filter false-positive pairs by keeping the lowest score from the (SRC-TRG and TRG-SRC) for each pair. In \"max\" strategy, we use the highest score for each pair. And in the \"mean\" strategy, an average of the scores are applied. We observe that filtering on the \"max\" score can turn some of the false-negative sentences into true-positives, which increases NMT performance (Table 2 ) . Ensembling We ensemble our top 3 trained transformer models under (min, max, mean) strategies and observe a minor improvement on the Pashto-English (ps-en) dataset. On the Khmer-English (km-en) dataset, there is no improvement (Table 3) .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 699, |
|
"end": 709, |
|
"text": "(Table 2 )", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 939, |
|
"end": 948, |
|
"text": "(Table 3)", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Rescoring", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Heuristic filters like overlap filters, length ratio, min-max length, and language identification are applied. For the Pashto-English setup, this step is not beneficial to the overall performance. For the Khmer-English setup, we observe a minor gain (Table 3 ). It appears that our scoring method can learn heuristic filtering on the fly without reliant on hard-coded heuristic filters.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 250, |
|
"end": 258, |
|
"text": "(Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Heuristic Filters", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "There is a relationship between F1 scores of the proxy task and the final NMT system performance (Figures 1-2) . Improvements of the final NMT in the proxy task peaks along with the negative sampling rate and decreases potentially due to overfitting. By looking at the same ratio presented in Figures 1-2 , we can conclude a correlation between the performance of the proxy task and that for the filtering task, showcasing the proposed approach's effectiveness. WMT20 Here we have presented our NMT performances of the submitted filtering systems in Table 3. Note that we measure all of the development cycles and improvements with the MBART finetuning . We do not replicate every experiment with training from scratch regime due to resource constraints. As shown in Table 3 , our method outperforms the LASER baseline without needing any prefiltering rules and costly marginal KNN scoring method in solving the hubness problem for both language settings.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 97, |
|
"end": 110, |
|
"text": "(Figures 1-2)", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 293, |
|
"end": 304, |
|
"text": "Figures 1-2", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 767, |
|
"end": 774, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "To find how our method generalizes across different filtering scenarios, we test it for the past generations of this shared task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Older Tasks", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We use the same neural machine translation system defined by the organizers. Our NMT model using the submission by Junczys-Dowmunt (2018) couldn't reach the reported scores (can be observed in Table 4 for the 10M subsampled set). Although our method couldn't match the SOTA results under these settings, it achieves a reasonable score. Note that we only used 10% of the available clean parallel data to accomplish this result. Also, instead of finetuning a multilingual pre-trained model, bilingual models can be tried to avoid the curse of multilinguality (Conneau et al., 2019) . WMT19 Our NMT model using the submission by couldn't reach the reported scores, as shown in Table 4 for the Nepalese-English (ne-en) set. The mismatches mentioned above with WMT18 and WMT19 are possibly due to a result of using multiple GPUs with distributed optimizers like stated in . In the low-resource setting, our method can surpass the SOTA results (Table 4) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 557, |
|
"end": 579, |
|
"text": "(Conneau et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 193, |
|
"end": 200, |
|
"text": "Table 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 674, |
|
"end": 681, |
|
"text": "Table 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 938, |
|
"end": 947, |
|
"text": "(Table 4)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "WMT18", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Task SOTA OURS WMT18 (de-en) *27.9 (28.62) 27.53 WMT19 (ne-en) *6.9 (7.1) 7.5 Table 4 : WMT18 and WMT19 filtering tasks test results. Note that numbers with \"*\" represent the submitted score performance under our NMT setup. Those in parenthesis are the reported scores.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 78, |
|
"end": 85, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "WMT18", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We illustrate our submission to the WMT20 lowresource parallel corpus filtering task. By developing a proxy task learner on top of a transform-based pre-trained language model XLM-R, We are able to improve the filtering capability for noisy data, achieving SOTA results. The parallel corpus filtering task is recalloriented. Therefore our model may not be suitable for high-precision jobs. The model has limitations in dealing with short sentences. It can be improved by finetuning on dictionaries or phase-based bitexts. The model performs better in low-resource and high-recall settings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In our experiments depicted in the subsection 3.1.6, we observe low performances several times. It may appear the model is suffering from the random seeds caused fragility mentioned in Risch and Krestel (2020) . A close look ascribes these abnormal results to the randomness in the sampling strategy. We leave this issue to future work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 185, |
|
"end": 209, |
|
"text": "Risch and Krestel (2020)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Different kinds of synthetic noise generation techniques can be adapted to increase the robustness and accuracy of the model. For example in the filtered data we observed several false-positive cases which contains mis-translated numbers: en reference:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\"3) Sonar coverage: 45K at 200KHz\" ps to en translation:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\"4) Sonar coverage: 90 at 125KHz\" Training an NMT model on this type of data hurts the translation performance. But this kind of noise can be fixed by altering the numerical values in the clean training data to sample negative pairs for our proxy task. Moreover, all the other synthetically generatable errors like a typo error, one to many alignment errors, etc. can be incorporated into the training data. But its not viable to model those kinds of errors independent from the language or domain with the naive assumptions and inventing heuristic rules. We believe further researches should focus on domain invariant noise generation techniques.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "https://github.com/seatgeek/fuzzywuzzy", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to show our gratitude to colleagues from HTRDC AIE and AARC, Huawei for their support during this work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Massively multilingual sentence embeddings for zeroshot cross-lingual transfer and beyond", |
|
"authors": [ |
|
{ |
|
"first": "Mikel", |
|
"middle": [], |
|
"last": "Artetxe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "597--610", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mikel Artetxe and Holger Schwenk. 2019. Mas- sively multilingual sentence embeddings for zero- shot cross-lingual transfer and beyond. Transac- tions of the Association for Computational Linguis- tics, 7:597-610.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Domain adaptation via pseudo in-domain data selection", |
|
"authors": [ |
|
{ |
|
"first": "Amittai", |
|
"middle": [], |
|
"last": "Axelrod", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "355--362", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amittai Axelrod, Xiaodong He, and Jianfeng Gao. 2011. Domain adaptation via pseudo in-domain data selection. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 355-362, Edinburgh, Scotland, UK. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "NRC parallel corpus filtering system for WMT 2019", |
|
"authors": [ |
|
{ |
|
"first": "Gabriel", |
|
"middle": [], |
|
"last": "Bernier-Colborne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chi-Kiu", |
|
"middle": [], |
|
"last": "Lo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Conference on Machine Translation", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "252--260", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-5434" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gabriel Bernier-Colborne and Chi-kiu Lo. 2019. NRC parallel corpus filtering system for WMT 2019. In Proceedings of the Fourth Conference on Machine Translation (Volume 3: Shared Task Papers, Day 2), pages 252-260, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "H2@ bucc18: Parallel sentence extraction from comparable corpora using multilingual sentence embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Houda", |
|
"middle": [], |
|
"last": "Bouamor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hassan", |
|
"middle": [], |
|
"last": "Sajjad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proc. Workshop on Building and Using Comparable Corpora", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Houda Bouamor and Hassan Sajjad. 2018. H2@ bucc18: Parallel sentence extraction from compara- ble corpora using multilingual sentence embeddings. In Proc. Workshop on Building and Using Compara- ble Corpora.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Lowresource corpus filtering using multilingual sentence embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Vishrav", |
|
"middle": [], |
|
"last": "Chaudhary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuqing", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Guzm\u00e1n", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Conference on Machine Translation", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "261--266", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-5435" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vishrav Chaudhary, Yuqing Tang, Francisco Guzm\u00e1n, Holger Schwenk, and Philipp Koehn. 2019. Low- resource corpus filtering using multilingual sentence embeddings. In Proceedings of the Fourth Confer- ence on Machine Translation (Volume 3: Shared Task Papers, Day 2), pages 261-266, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Unsupervised cross-lingual representation learning at scale", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kartikay", |
|
"middle": [], |
|
"last": "Khandelwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vishrav", |
|
"middle": [], |
|
"last": "Chaudhary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Wenzek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Guzm\u00e1n", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1911.02116" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\u00e1n, Edouard Grave, Myle Ott, Luke Zettle- moyer, and Veselin Stoyanov. 2019. Unsupervised cross-lingual representation learning at scale. arXiv preprint arXiv:1911.02116.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Extracting parallel sentences with bidirectional recurrent neural networks to improve machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Francis", |
|
"middle": [], |
|
"last": "Gr\u00e9goire", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philippe", |
|
"middle": [], |
|
"last": "Langlais", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Francis Gr\u00e9goire and Philippe Langlais. 2018. Ex- tracting parallel sentences with bidirectional recur- rent neural networks to improve machine translation. CoRR, abs/1806.05559.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Effective parallel corpus mining using bilingual sentence embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Mandy", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qinlan", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinfei", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heming", |
|
"middle": [], |
|
"last": "Ge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gustavo", |
|
"middle": [ |
|
"Hernandez" |
|
], |
|
"last": "Abrego", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Keith", |
|
"middle": [], |
|
"last": "Stevens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "Constant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yun-Hsuan", |
|
"middle": [], |
|
"last": "Sung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Strope", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ray", |
|
"middle": [], |
|
"last": "Kurzweil", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Third Conference on Machine Translation: Research Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "165--176", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-6317" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mandy Guo, Qinlan Shen, Yinfei Yang, Heming Ge, Daniel Cer, Gustavo Hernandez Abrego, Keith Stevens, Noah Constant, Yun-Hsuan Sung, Brian Strope, and Ray Kurzweil. 2018. Effective parallel corpus mining using bilingual sentence embeddings. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 165-176, Bel- gium, Brussels. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Dual conditional cross-entropy filtering of noisy parallel corpora", |
|
"authors": [ |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Junczys-Dowmunt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Third Conference on Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "901--908", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcin Junczys-Dowmunt. 2018. Dual conditional cross-entropy filtering of noisy parallel corpora. In Proceedings of the Third Conference on Machine Translation, pages 901-908, Belgium, Brussels. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Findings of the WMT 2019 shared task on parallel corpus filtering for low-resource conditions", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Guzm\u00e1n", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vishrav", |
|
"middle": [], |
|
"last": "Chaudhary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [], |
|
"last": "Pino", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Conference on Machine Translation", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "54--72", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-5404" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn, Francisco Guzm\u00e1n, Vishrav Chaud- hary, and Juan Pino. 2019. Findings of the WMT 2019 shared task on parallel corpus filtering for low-resource conditions. In Proceedings of the Fourth Conference on Machine Translation (Volume 3: Shared Task Papers, Day 2), pages 54-72, Flo- rence, Italy. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Applying cyclical learning rate to neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Cedric", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cedric K. M. Lee, Jianfeng Liu, and Wei Peng. 2020. Applying cyclical learning rate to neural machine translation. ArXiv, abs/2004.02401.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Multilingual denoising pre-training for neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Edunov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marjan", |
|
"middle": [], |
|
"last": "Ghazvininejad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2001.08210" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, and Luke Zettlemoyer. 2020. Multilingual denoising pre-training for neural machine translation. arXiv preprint arXiv:2001.08210.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Roberta: A robustly optimized bert pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Sentence-BERT: Sentence embeddings using Siamese BERTnetworks", |
|
"authors": [ |
|
{ |
|
"first": "Nils", |
|
"middle": [], |
|
"last": "Reimers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3982--3992", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1410" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence- BERT: Sentence embeddings using Siamese BERT- networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 3982-3992, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Bagging BERT models for robust aggression identification", |
|
"authors": [ |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Risch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralf", |
|
"middle": [], |
|
"last": "Krestel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Second Workshop on Trolling, Aggression and Cyberbullying", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "55--61", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julian Risch and Ralf Krestel. 2020. Bagging BERT models for robust aggression identification. In Pro- ceedings of the Second Workshop on Trolling, Ag- gression and Cyberbullying, pages 55-61, Marseille, France. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Gar- nett, editors, Advances in Neural Information Pro- cessing Systems 30, pages 5998-6008. Curran Asso- ciates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Improving multilingual sentence embedding using bidirectional dual encoder with additive margin softmax", |
|
"authors": [ |
|
{ |
|
"first": "Yinfei", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gustavo", |
|
"middle": [], |
|
"last": "Hern\u00e1ndez\u00e1brego", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandy", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qinlan", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yun-Hsuan", |
|
"middle": [], |
|
"last": "Sung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Strope", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ray", |
|
"middle": [], |
|
"last": "Kurzweil", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinfei Yang, Gustavo Hern\u00e1ndez\u00c1brego, Steve Yuan, Mandy Guo, Qinlan Shen, Daniel Cer, Yun-Hsuan Sung, Brian Strope, and Ray Kurzweil. 2019. Im- proving multilingual sentence embedding using bi- directional dual encoder with additive margin soft- max. CoRR, abs/1902.08564.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"text": "Proxy task validation performance in the face of changing volumes of training data (Pashto -English).", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"text": "MBART performance of the filtering model (Pashto -English).", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td>: NMT results of systems trained after filter-</td></tr><tr><td>ing based on different bidirectional scoring strategies</td></tr><tr><td>(Pashto -English)</td></tr></table>", |
|
"text": "", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "NMT scores (BLEU) of the models that trained on a corpus filtered by the specified methods on WMT20 test sets. The bold fonts indicate the SOTA results. * indicates finetuning of the pretrained MBART model which is provided by the organizers.", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |