|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T03:43:36.249989Z" |
|
}, |
|
"title": "PATQUEST: Papago Translation Quality Estimation", |
|
"authors": [ |
|
{ |
|
"first": "Yujin", |
|
"middle": [], |
|
"last": "Baek", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Zae", |
|
"middle": [ |
|
"Myung" |
|
], |
|
"last": "Kim", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jihyung", |
|
"middle": [], |
|
"last": "Moon", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Hyunjoong", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Eunjeong", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Park", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper describes the system submitted by Papago team for the quality estimation task at WMT 2020. It proposes two key strategies for quality estimation: (1) task-specific pretraining scheme, and (2) task-specific data augmentation. The former focuses on devising learning signals for pretraining that are closely related to the downstream task. We also present data augmentation techniques that simulate the varying levels of errors that the downstream dataset may contain. Thus, our PATQUEST models are exposed to erroneous translations in both stages of task-specific pretraining and finetuning, effectively enhancing their generalization capability. Our submitted models achieve significant improvement over the baselines for Task 1 (Sentence-Level Direct Assessment; EN-DE only), and Task 3 (Document-Level Score). * Equal contribution \u2020 Work done during internship at Naver Corp.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper describes the system submitted by Papago team for the quality estimation task at WMT 2020. It proposes two key strategies for quality estimation: (1) task-specific pretraining scheme, and (2) task-specific data augmentation. The former focuses on devising learning signals for pretraining that are closely related to the downstream task. We also present data augmentation techniques that simulate the varying levels of errors that the downstream dataset may contain. Thus, our PATQUEST models are exposed to erroneous translations in both stages of task-specific pretraining and finetuning, effectively enhancing their generalization capability. Our submitted models achieve significant improvement over the baselines for Task 1 (Sentence-Level Direct Assessment; EN-DE only), and Task 3 (Document-Level Score). * Equal contribution \u2020 Work done during internship at Naver Corp.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "With the widespread use of machine translation systems, there is a growing need to evaluate translated results at low-cost. The task of quality estimation (QE) addresses this issue, where the quality of a translation is predicted automatically given the source sentence and its translation. The estimated quality can inform users about the reliability of the translation, or whether it needs to be post-edited.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Previous QE systems generally include pretraining and finetuning steps, where the former step involves masked language modeling (MLM) utilizing large parallel corpora, with the expectation that the models will learn cross-lingual relationships (Kepler et al., 2019; Kim et al., 2019) . The models are, in turn, finetuned with task-specific data. However, while the pretraining step involves training data with near-perfect translations, lowquality translations are only introduced during the finetuning step. In this work, we suggest two key strategies that could alleviate this pretrain-finetune discrepancy in QE tasks by: (1) adopting a task-specific pretraining objective which is close to that of the downstream task, and (2) generating abundant taskspecific erroneous sentence pairs and their learning signals. Our approach, which is depicted in Figure 1, is motivated from BLEURT (Sellam et al., 2020) , where we extend their general approach to the bilingual QE setting. Our submitted systems achieve significant improvements in performance over the baseline systems on WMT20 Shared Tasks for QE (Specia et al., 2020) : an absolute gain of +35.2% in Pearson score for (Task 1) Sentence-Level Direct Assessment (EN-DE), and +18.4% in Pearson score for (Task 3) Document-Level Score.", |
|
"cite_spans": [ |
|
{ |
|
"start": 244, |
|
"end": 265, |
|
"text": "(Kepler et al., 2019;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 266, |
|
"end": 283, |
|
"text": "Kim et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 887, |
|
"end": 908, |
|
"text": "(Sellam et al., 2020)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1104, |
|
"end": 1125, |
|
"text": "(Specia et al., 2020)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 852, |
|
"end": 858, |
|
"text": "Figure", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "2 Sentence-Level QE: Direct Assessment The task of sentence-level QE for direct assessment (DA) involves predicting the perceived quality of the translation given the source and the translated sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Following the footsteps of the previous work on QE, our sentence-level system also utilizes the pretrained multilingual language models such as BERT (Devlin et al., 2018) and Cross-lingual Language Model (XLM) (Conneau and Lample, 2019) . As the size of the training corpus for the QE task is very limited (7K sentence pairs), it is crucial to align these models closely to the task using more data in the form of task-specific pretraining. As opposed to pretraining the models on parallel corpora using the standard MLM approach, we pretrain the models in a multi-task setting using learning signals and data that are arguably more task-specific similar to Sellam et al. (2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 149, |
|
"end": 170, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 210, |
|
"end": 236, |
|
"text": "(Conneau and Lample, 2019)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 658, |
|
"end": 678, |
|
"text": "Sellam et al. (2020)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In order to better align the pretrained models to the QE task, synthetic sentence pairs that contain various types of translation errors are generated from clean parallel corpora 1 . For each target sentence, we generate two perturbed sentences by separately applying one of the four methods described below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Data Augmentation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Omitted Word We randomly omit at most three words from the target-side, simulating inadequate translations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Data Augmentation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Word Order Based on the part-of-speech (POS) tag for each word in the target sentence, and predefined sequences of POS patterns, we randomly swap two target words if those words match one of the patterns. The POS patterns can be contiguous, e.g., adjective-space-noun, or long-ranged, e.g., noun-*-adjective. When none of the patterns are matched, we randomly swap two words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Data Augmentation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Lexical Selection For each target sentence, we mask out at most three words randomly, and apply mask-filling via a German BERT model from Hugging Face 2 . The purpose of this alteration is to generate fluent but somewhat inadequate target sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Data Augmentation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Repeated Phrase In order to simulate the repetition problem in translations generated by neural machine translation models, we alter the target sentence by adding a repetition of a random phrase within the sentence. The length of the random phrase is at most three tokens.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Data Augmentation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "1 Europarl v10 and News Commentary v15 2 bert-base-german-cased, https://huggingface.co/transformers/ pretrained_models.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Data Augmentation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "As the goal of the downstream task is to predict the DA scores which represent the \"perceived quality\" of the translation, we need to consider pretraining signals that can capture the somewhat subjective notion of \"good\" and \"bad\" translations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Learning Signals", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Consulting the related works, we prepared the three learning signals:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Learning Signals", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 SentenceBERT score (Reimers and Gurevych, 2019) \u2022 BERTScore (Zhang et al., 2019) , extended to multilingual setting", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 49, |
|
"text": "(Reimers and Gurevych, 2019)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 62, |
|
"end": 82, |
|
"text": "(Zhang et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Learning Signals", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 Target (German) Language Model (GPT-2, Radford et al. (2019) ) score", |
|
"cite_spans": [ |
|
{ |
|
"start": 41, |
|
"end": 62, |
|
"text": "Radford et al. (2019)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Learning Signals", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "For each sentence pair in the original bilingual corpora as well as the augmented ones, the three types of learning signals are computed, and later used in the task-specific pretraining.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Learning Signals", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "For a given sentence, SentenceBERT produces a semantically meaningful sentence embedding that can be compared using a distance metric.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SentenceBERT Score", |
|
"sec_num": "2.2.1" |
|
}, |
|
{ |
|
"text": "We note that when comparing the distance between two sentence vectors, the Kendall rank correlation coefficient (Kendall, 1938) is computed instead of the cosine similarity measure as the former correlates better with the human judgement, possibly because it produces a more widespread range of scores than the latter especially when the dimension of the sentence vectors is high.", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 127, |
|
"text": "(Kendall, 1938)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SentenceBERT Score", |
|
"sec_num": "2.2.1" |
|
}, |
|
{ |
|
"text": "In our experiments, we used the publicly available multilingual SentenceBERT model released from UKPLab 3 that supports 13 languages including English and German.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SentenceBERT Score", |
|
"sec_num": "2.2.1" |
|
}, |
|
{ |
|
"text": "While SentenceBERT score looks at the sentence embedding as a whole, BERTScore computes a similarity score for each token in the pair of sentences. We include BERTScore as one of the learning signals because we feared that the meanpooling of the BERT-embedded tokens within the SentenceBERT model, while effective in extracting the overall meaning of the sentence, may overlook some of the small semantic details within the sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual BERTScore", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "However, as the original BERTScore is designed to work in monolingual setting, i.e. evaluating a translation against a reference sentence, it needs to be extended in multilingual setting using a multilingual BERT (mBERT) model. Analogous to the original approach, the multilingual BERTScores can be computed in various ways depending on which side we are computing the maximum similarities from.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual BERTScore", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "In our experiments, we devise a metric where we merge both the source-and target-side maximum similarities between tokens with the corresponding inverse document frequency (IDF) weighting; thus, given a sequence of vectorized source and target tokens, s and t, we defined the mBERTScore of s and t to be:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual BERTScore", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "S s\u2192t + S t\u2192s s i \u2208s idf(s i ) + t j \u2208t idf(t j )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual BERTScore", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual BERTScore", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "S s\u2192t = s i \u2208s idf(s i )max t j \u2208t s i \u22a4 t j S t\u2192s = t j \u2208t idf(t j )max s i \u2208s t j \u22a4 s i", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual BERTScore", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "While SentenceBERT and multilingual BERTScore can be used as proxies for evaluating the \"adequacy\" of the translation, empirically, we noticed that they cannot seem to sufficiently represent the \"fluency\" of translated target sentence. In other words, both metrics may assign high scores to the translated sentence if key source tokens are translated and present in the translation, even when the overall sentence may not be articulate.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Language Model Score", |
|
"sec_num": "2.2.3" |
|
}, |
|
{ |
|
"text": "To address this issue, the target language model (GPT-2) score is added to the set of learning signals. We simply use the arithmetic mean of the tokenlevel predictions to produce the score for a target sentence. We utilize the pretrained GPT-2 model for German released by Zamia Brain 4 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Language Model Score", |
|
"sec_num": "2.2.3" |
|
}, |
|
{ |
|
"text": "We have two stages for task-specific training, i.e. first with the augmented data and the learning signals, and second with the provided QE dataset (ref.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Section 2.4). As the output to predict for each stage is different, we utilize the following two types of model architectures. Figure 3: The model architecture (left) for the taskspecific finetuning using the provided QE dataset. For each concatenated vector computed within each Score Block (c.f. Fig. 2.) , a Linear Block (right) is added on top of it. The results from the Linear Blocks are concatenated and used to produce the final DA score.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 298, |
|
"end": 306, |
|
"text": "Fig. 2.)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "On top of the specific layer of the pretrained mBERT or XLM models, we attach a series of layers called \"Score Block\" for each type of learning signal as depicted in Figure 2 . We utilize the 9th and 5th layer of the BERT and XLM models, respectively, as these layers are reported to be more semantically relevant (Jawahar et al., 2019; Zhang et al., 2019) . In addition to using the vector representation of the [CLS] token, utilizing the mean-pooled and max-pooled vectors from all tokens further improved the performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 314, |
|
"end": 336, |
|
"text": "(Jawahar et al., 2019;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 356, |
|
"text": "Zhang et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 166, |
|
"end": 174, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model for Task-Specific Pretraining", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "Once the task-specific pretraining is completed, we begin the finetuning by adding layers above the concatenation layer within each Score Block, as shown in Figure 3 . Thus, we have three concatenated vectors being fed to three \"Linear Blocks\" separately, whose purpose is to reduce the dimensions of the hidden representation, preparing it for the final regression layer.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 165, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model for Task-Specific Finetuning", |
|
"sec_num": "2.3.2" |
|
}, |
|
{ |
|
"text": "We note that applying dropout (Srivastava et al., 2014) to these linear layers helps with the performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 30, |
|
"end": 55, |
|
"text": "(Srivastava et al., 2014)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model for Task-Specific Finetuning", |
|
"sec_num": "2.3.2" |
|
}, |
|
{ |
|
"text": "We experiment with three different types of pretrained models: mBERT 5 , XLM trained with MLM (XLM-MLM) 6 , and XLM trained with causal language modeling (XLM-CLM) 7 . All of the pretrained models are available at Hugging Face.", |
|
"cite_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 105, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Training", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "As the size of the provided QE dataset is small, we make use of the existing parallel data as well as the error-induced synthetic data. For the EN-DE bilingual dataset, we select a subset from this year's training corpora for WMT News Translation Task, summing to just under 10M sentence pairs; for the synthetic dataset, the size is 3.4M.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Pretraining (TSP)", |
|
"sec_num": "2.4.1" |
|
}, |
|
{ |
|
"text": "Given the concatenated source and target sentences as an input, the model for TSP is trained to predict the three types of learning signals in a multi-task setting by minimizing the sum of the mean squared error losses for each signal (ref. Figure 2) .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 241, |
|
"end": 250, |
|
"text": "Figure 2)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Task-Specific Pretraining (TSP)", |
|
"sec_num": "2.4.1" |
|
}, |
|
{ |
|
"text": "Once the model is trained with the augmented data, its parameters are loaded to the model for TSF (ref. Figure 3) , and finetuned using the QE dataset. This time, the model learns to predict the mean z-normalized DA score.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 113, |
|
"text": "Figure 3)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Task-Specific Finetuning (TSF)", |
|
"sec_num": "2.4.2" |
|
}, |
|
{ |
|
"text": "3 Document-Level QE: MQM Scoring Given a source and its translated document, this task involves identifying translation errors and estimating the translation quality of the document based on the taxonomy of the Multidimensional Quality Metrics (MQM) 8 . With the pre-defined MQM taxonomy, human annotators assess whether the translation satisfies the specifications, and from these annotations, an MQM score is obtained. In this work, we focus on building a system that predicts the MQM score for a given pair of source and translated document.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Finetuning (TSF)", |
|
"sec_num": "2.4.2" |
|
}, |
|
{ |
|
"text": "The major difficulty that we encountered in this task was the lack of training data. As the amount of provided data is limited (8,591 sentence pairs), a model that is solely finetuned on this small-scale data was not capable enough to differentiate sentences with varying level of errors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Finetuning (TSF)", |
|
"sec_num": "2.4.2" |
|
}, |
|
{ |
|
"text": "To address this issue, we propose simple yet effective methods for task-specific data augmentation, and task-specific training framework 9 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Finetuning (TSF)", |
|
"sec_num": "2.4.2" |
|
}, |
|
{ |
|
"text": "We generate erroneous sentence pairs and their pseudo-MQM scores from Europarl and QE training corpus in accordance with the MQM taxonomy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Data Augmentation", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Out of the 45 error categories specified in QE annotations, we select five frequent categories for which we can automatically perturb the target-side of the parallel corpus at little cost. More details on our data augmentation technique for each category are provided below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating Erroneous Sentence Pairs", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "We introduce an error into the target-side of a sentence pair by randomly omitting one of the French prepositions that exist in the sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Omitted Preposition", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Omitted Determiner The same process is done for French determiners as for prepositions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Omitted Preposition", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We replace a French preposition with another one. When more than one candidate exists, we choose one at random.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Wrong Preposition", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We exploit grammatical pattern that most descriptive adjectives go after the noun in French sentences (unlike English ones). Using an in-house French POS tagger, we identify post-nominal adjectives and place them in front of the corresponding nouns so that they are now pre-nominal.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Order", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We mask-out target tokens at random positions, and substitute them with tokens predicted by the Camembert language model (Martin et al., 2020). Table 2 : Examples of erroneous sentence pairs generated from the WMT20 QE corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 144, |
|
"end": 151, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Lexical Selection", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Once we introduce different types of errors into the target-side sentences, the next step is to obtain pseudo-MQM scores for the altered sentence pairs. Two key elements for computing MQM score are the length of a text, and its total error severity as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Learning Signal", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "Pseudo-MQM = 100(1 \u2212 5.0 * n error + S N )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Learning Signal", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "where N indicates the length of given target sentence and n error denotes the number of errors introduced in it. We assign 5.0, the most frequent severity, to each perturbation that we make. If an error severity score, S, is assigned to the sentence by human annotators, we add this score to compute the total error severity score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Learning Signal", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "We use pretrained mBERT or XLM 10 as initial parameters. The concatenation of a source sentence and its corresponding target sentence with special symbol tokens is taken as input:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "[CLS] source [SEP] target [SEP].", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We experiment with two strategies for obtaining sentence embeddings. First, we feed a hidden state vector corresponding to [CLS] token (h [CLS] ) to a linear layer to compute a sentence-level MQM prediction of\u0177:\u0177", |
|
"cite_spans": [ |
|
{ |
|
"start": 138, |
|
"end": 143, |
|
"text": "[CLS]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "= W h [CLS] + b", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where W and b are the weight matrix and bias vector of the linear layer, respectively. For the other 10 xlm-mlm-enfr-1024 method, we use the concatenation of a mean-pooled source representation (s \u2208 R n ), mean-pooled target representation (t \u2208 R n ) and their element-wise differences (|s \u2212 t| \u2208 R n ) in an attempt to enlarge the model capacity:", |
|
"cite_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 103, |
|
"text": "10", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "y = W \u2022 ReLU(W r (s, t, |s \u2212 t|) + b r ) + b", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where W r \u2208 R 3n\u00d7n and b r are the weight matrix and bias vector of an intermediate dimensionreducing layer, respectively, and n denotes the dimension of hidden vectors. W and b are the weight matrix and bias vector of the final linear layer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We suggest that the pretraining objective should be similar to that of the downstream task in order to mitigate the pretrain-finetune discrepancy (Yang et al., 2019) , and fully leverage the erroneous sentence pairs that we generated. For this task, both phases minimize the mean-squared loss function:", |
|
"cite_spans": [ |
|
{ |
|
"start": 146, |
|
"end": 165, |
|
"text": "(Yang et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "l = 1 K K k=1 y k \u2212\u0177 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We utilize Europarl parallel corpus (English-French) to pretrain our submitted models 11 . To acquire high quality data, we carried out the following filtering processes: (1) language detection (filtering out non-English sentences in the sourceside, and non-French sentences in the target-side), (2) length ratio filtering (eliminating sentence pairs with length ratio greater than 1.8).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Pretraining (TSP)", |
|
"sec_num": "3.3.1" |
|
}, |
|
{ |
|
"text": "We assume that the remaining sentence pairs do not contain any translation error. Therefore, we assign the total error severity score of zero to these pairs before the augmentation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Pretraining (TSP)", |
|
"sec_num": "3.3.1" |
|
}, |
|
{ |
|
"text": "About 15.2 million examples 12 are generated with the above-mentioned data augmentation techniques. The detailed examples are provided in Table 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Pretraining (TSP)", |
|
"sec_num": "3.3.1" |
|
}, |
|
{ |
|
"text": "The next step is to finetune our model using the augmented QE train data. Unlike Europarl corpus, we can fully leverage the MQM scores originally assigned to the QE training dataset. We found that performing the data augmentation with three categories (Omitted Determiner, Omitted Preposition, and Wrong Preposition) effectively improves the performance. The original QE training sentence pairs represent about 5% of 169,997 sentence pairs obtained from the data augmentation. We also provide the augmented examples for QE training data in Table 2 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 252, |
|
"end": 316, |
|
"text": "(Omitted Determiner, Omitted Preposition, and Wrong Preposition)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 540, |
|
"end": 547, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Task-Specific Finetuning (TSF)", |
|
"sec_num": "3.3.2" |
|
}, |
|
{ |
|
"text": "Since the learning objective is identical to that of the pretraining phase, we can simply train the same model with the augmented downstream task data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-Specific Finetuning (TSF)", |
|
"sec_num": "3.3.2" |
|
}, |
|
{ |
|
"text": "We specify that the models are trained at sentencelevel, learning to predict the non-truncated version of MQM scores which could take a range between negative infinity and 100; this is to avoid potential information loss that could arise from the truncation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Document-Level MQM Score", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Given a document, the document-level MQM score is computed from its sentence-level MQM predictions in a closed form. Afterwards, we truncate negative values to zero. Task Table 3 shows the Pearson correlation coefficient between the predicted z-normalized DA scores and the reference scores on the development set. We note that the number of parameters for PATQUEST-mBERT (724M) is greater than that of PATQUEST-XLM (616M) models, resulting in the difference in the correlation scores. Nevertheless, computing the arithmetic mean of the scores produced by these three models improves the performance (PATQUEST-ensemble).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 166, |
|
"end": 180, |
|
"text": "Task Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Document-Level MQM Score", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "The final result on the QE test set is shown in Table 4 . We observe that finetuning the model with the additional error-induced synthetic data improves the performance as well as ensembling the models. Our final submitted system (PATQUESTensemble) finished 4th out of the 15 submitted systems 13 in the final ranking of the sentence-level QE task for English-German. In order to train a generally applicable QE system, we did not make use of the data such as internal information from the NMT models and in-domain Wikipedia texts that could be extracted from the provided Wikipedia titles.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 48, |
|
"end": 55, |
|
"text": "Table 4", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sentence-Level", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The validation results on development set are shown in Table 5 . Both PATQUEST-mBERT and PATQUEST-XLM models use representations from [CLS] token. We build another two models, PATQUEST-mBERT variant 1 and 2, using the concatenations of mean-pooled source representations, mean-pooled target representations, and their element-wise differences. Table 6 shows the test results of our submitted PATQUEST models. For PATQUEST-ensemble, we compute an average from the four models enumerated in Table 5 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 62, |
|
"text": "Table 5", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 344, |
|
"end": 351, |
|
"text": "Table 6", |
|
"ref_id": "TABREF7" |
|
}, |
|
{ |
|
"start": 489, |
|
"end": 496, |
|
"text": "Table 5", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Document-Level Task", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In Table 7 , the effectiveness of our training scheme and data augmentation techniques is illustrated via an ablation study. Note that \"Pretrained mBERT (A)\" in the model that is finetuned on the original QE data without any task-specific training. Both TSP and TSF enhance the generalization ability of model. Note that the mBERT model trained via TSP and TSF, \"A + TSP + TSF\", is the same model as PATQUEST-mBERT which itself achieves a significant improvement over the baselines as shown in Table 6 . Our final system (PATQUEST-ensemble) submitted for the document-level QE task, came 1st out of the three submitted systems 14 . Similar to our sentence-level system, our document-level system also did not utilize any internal information from the NMT models and in-domain Wikipedia data tailored to the benchmark.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 7", |
|
"ref_id": "TABREF9" |
|
}, |
|
{ |
|
"start": 494, |
|
"end": 501, |
|
"text": "Table 6", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Document-Level Task", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In this paper, we present a task-specific pretraining scheme for the QE task. Our pretraining objective is devised so that it is closely related (Task 1) or identical (Task 3) to the finetuning objective. In addition, the models are exposed to abundant amount of error-induced translations generated from large parallel corpora, effectively alleviating the issue of 14 data scarcity. Our proposed models yield significant improvement over the baseline systems for the two tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 366, |
|
"end": 368, |
|
"text": "14", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "distiluse-base-multilingual-cased, https://github.com/UKPLab/ sentence-transformers", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "gpt2-german-345M-r20191119, http://zamia-speech.org/brain", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "bert-base-multilingual-cased6 xlm-mlm-ende-1024 7 xlm-clm-ende-1024 8 http://www.qt21.eu/mqm-definition", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The code will be available at https://github.com/ naver/PATQUEST.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We perform TSP after bringing pretrained parameters of language models as initial weights.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The size of the original Europarl English-French parallel corpus is about 2M sentence pairs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Excluding the disqualified team.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "Authors would like to thank St\u00e9phane Clinchant, Vassilina Nikoulina, and Jaesong Lee for the insightful discussions, and Papago team members for offering the fruitful feedback. We would also like to extend our gratitude to Won Ik Cho for coming up with the awesome name for our system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Crosslingual language model pretraining", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7059--7069", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau and Guillaume Lample. 2019. Cross- lingual language model pretraining. In Advances in Neural Information Processing Systems, pages 7059-7069.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "What does BERT learn about the structure of language", |
|
"authors": [ |
|
{ |
|
"first": "Ganesh", |
|
"middle": [], |
|
"last": "Jawahar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beno\u00eet", |
|
"middle": [], |
|
"last": "Sagot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Djam\u00e9", |
|
"middle": [], |
|
"last": "Seddah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3651--3657", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ganesh Jawahar, Beno\u00eet Sagot, and Djam\u00e9 Seddah. 2019. What does BERT learn about the structure of language? In Proceedings of the 57th Annual Meeting of the Association for Computational Lin- guistics, pages 3651-3657, Florence, Italy. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A new measure of rank correlation", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Maurice", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kendall", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1938, |
|
"venue": "Biometrika", |
|
"volume": "30", |
|
"issue": "1", |
|
"pages": "81--93", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maurice G Kendall. 1938. A new measure of rank cor- relation. Biometrika, 30(1/2):81-93.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Unbabel's participation in the wmt19 translation quality estimation shared task", |
|
"authors": [ |
|
{ |
|
"first": "Fabio", |
|
"middle": [], |
|
"last": "Kepler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonay", |
|
"middle": [], |
|
"last": "Tr\u00e9nous", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Treviso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miguel", |
|
"middle": [], |
|
"last": "Vera", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ant\u00f3nio", |
|
"middle": [], |
|
"last": "G\u00f3is", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Amin Farajian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Ant\u00f3nio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e9 Ft", |
|
"middle": [], |
|
"last": "Lopes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Martins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Conference on Machine Translation", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "78--84", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fabio Kepler, Jonay Tr\u00e9nous, Marcos Treviso, Miguel Vera, Ant\u00f3nio G\u00f3is, M Amin Farajian, Ant\u00f3nio V Lopes, and Andr\u00e9 FT Martins. 2019. Unbabel's par- ticipation in the wmt19 translation quality estima- tion shared task. In Proceedings of the Fourth Con- ference on Machine Translation (Volume 3: Shared Task Papers, Day 2), pages 78-84.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Qe bert: Bilingual bert using multi-task learning for neural quality estimation", |
|
"authors": [ |
|
{ |
|
"first": "Hyun", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joon-Ho", |
|
"middle": [], |
|
"last": "Lim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hyun-Ki", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Seung-Hoon", |
|
"middle": [], |
|
"last": "Na", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Conference on Machine Translation", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "85--89", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hyun Kim, Joon-Ho Lim, Hyun-Ki Kim, and Seung- Hoon Na. 2019. Qe bert: Bilingual bert using multi-task learning for neural quality estimation. In Proceedings of the Fourth Conference on Machine Translation (Volume 3: Shared Task Papers, Day 2), pages 85-89.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Camembert: a tasty french language model", |
|
"authors": [ |
|
{ |
|
"first": "Louis", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Muller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pedro Javier Ortiz", |
|
"middle": [], |
|
"last": "Su\u00e1rez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoann", |
|
"middle": [], |
|
"last": "Dupont", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Romary", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Louis Martin, Benjamin Muller, Pedro Javier Or- tiz Su\u00e1rez, Yoann Dupont, Laurent Romary, Eric Villemonte de la Clergerie, Djam\u00e9 Seddah, and Beno\u00eet Sagot. 2020. Camembert: a tasty french lan- guage model. In Proceedings of the 58th Annual Meeting of the Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Language models are unsupervised multitask learners", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rewon", |
|
"middle": [], |
|
"last": "Child", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dario", |
|
"middle": [], |
|
"last": "Amodei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "OpenAI Blog", |
|
"volume": "1", |
|
"issue": "8", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. OpenAI Blog, 1(8):9.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Sentencebert: Sentence embeddings using siamese bertnetworks", |
|
"authors": [ |
|
{ |
|
"first": "Nils", |
|
"middle": [], |
|
"last": "Reimers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1908.10084" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence- bert: Sentence embeddings using siamese bert- networks. arXiv preprint arXiv:1908.10084.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Bleurt: Learning robust metrics for text generation", |
|
"authors": [ |
|
{ |
|
"first": "Thibault", |
|
"middle": [], |
|
"last": "Sellam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur P", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.04696" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thibault Sellam, Dipanjan Das, and Ankur P Parikh. 2020. Bleurt: Learning robust metrics for text gen- eration. arXiv preprint arXiv:2004.04696.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Findings of the wmt 2020 shared task on quality estimation", |
|
"authors": [ |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fr\u00e9d\u00e9ric", |
|
"middle": [], |
|
"last": "Blain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marina", |
|
"middle": [], |
|
"last": "Fomicheva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erick", |
|
"middle": [], |
|
"last": "Fonseca", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vishrav", |
|
"middle": [], |
|
"last": "Chaudhary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Guzm\u00e1n", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e9 Ft", |
|
"middle": [], |
|
"last": "Martins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Fifth Conference on Machine Translation: Shared Task Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lucia Specia, Fr\u00e9d\u00e9ric Blain, Marina Fomicheva, Er- ick Fonseca, Vishrav Chaudhary, Francisco Guzm\u00e1n, and Andr\u00e9 FT Martins. 2020. Findings of the wmt 2020 shared task on quality estimation. In Proceed- ings of the Fifth Conference on Machine Translation: Shared Task Papers.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Dropout: A simple way to prevent neural networks from overfitting", |
|
"authors": [ |
|
{ |
|
"first": "Nitish", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Krizhevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "15", |
|
"issue": "56", |
|
"pages": "1929--1958", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. 2014. Dropout: A simple way to prevent neural networks from overfitting. Journal of Machine Learning Re- search, 15(56):1929-1958.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Russ", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5753--5763", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Russ R Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. In Advances in neural in- formation processing systems, pages 5753-5763.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Bertscore: Evaluating text generation with bert", |
|
"authors": [ |
|
{ |
|
"first": "Tianyi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Varsha", |
|
"middle": [], |
|
"last": "Kishore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Kilian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Weinberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Artzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1904.09675" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q Weinberger, and Yoav Artzi. 2019. Bertscore: Eval- uating text generation with bert. arXiv preprint arXiv:1904.09675.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Overview of our approach for Task 1 and 3.", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"content": "<table><tr><td>Error name</td><td>Sentence</td><td>Length</td><td>Total error severity</td><td>Pseudo MQM</td></tr><tr><td>Original sentence</td><td>Vous avez souhait\u00e9 un d\u00e9bat\u00e0 ce sujet dans les prochains jours, au cours de cette p\u00e9riode de session.</td><td>21</td><td>0</td><td>100.0</td></tr><tr><td colspan=\"3\">(1) 21</td><td>5</td><td>76.2</td></tr><tr><td colspan=\"3\">(2) 21</td><td>5</td><td>76.2</td></tr><tr><td>(1)+(2)</td><td/><td>20</td><td>10</td><td>52.4</td></tr><tr><td>Original sentence</td><td colspan=\"2\">Cela placerait l'23</td><td>0</td><td>100.0</td></tr><tr><td>(1) Word Order</td><td colspan=\"2\">Cela placerait l'23</td><td>5</td><td>78.3</td></tr><tr><td>(2) Lexical Selection</td><td colspan=\"2\">Cela placerait l'23</td><td>5</td><td>78.3</td></tr><tr><td>(1)+(2)</td><td>Cela placerait l'UE dans une situation inconfortable vis-\u00e0-vis de ces pays et de la internationale communaut\u00e9.</td><td>23</td><td>10</td><td>56.5</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Wrong Preposition Vous avez souhait\u00e9 un d\u00e9bat\u00e0 ce sujet chez les prochains jours, au cours de cette p\u00e9riode de session. Omit Determiner Vous avez souhait\u00e9 un d\u00e9bat\u00e0 ce sujet dans les prochains jours, au cours de cette p\u00e9riode de session. Vous avez souhait\u00e9 un d\u00e9bat\u00e0 ce sujet chez les prochains jours, au cours de cette p\u00e9riode de session. UE dans une situation d\u00e9licate vis-\u00e0-vis de ces pays et de la communaut\u00e9 internationale. UE dans une situation d\u00e9licate vis-\u00e0-vis de ces pays et de la internationale communaut\u00e9. UE dans une situation inconfortable vis-\u00e0-vis de ces pays et de la communaut\u00e9 internationale.", |
|
"html": null |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"content": "<table><tr><td>Error name</td><td>Sentence</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Examples of erroneous sentence pairs generated from the Europarl corpus. Wrong Preposition son travail a\u00e9t\u00e9 pr\u00e9sent\u00e9 pour le washington post, quotidien bonbons, washingtonian, fit yoga et journal d'yoga.", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"content": "<table><tr><td>Model</td><td colspan=\"3\">Pearson's r \u2191 MAE \u2193 RMSE \u2193</td></tr><tr><td>Baseline</td><td>0.146</td><td>0.679</td><td>0.967</td></tr><tr><td>PATQUEST-mBERT w/o synth. data</td><td>0.429</td><td>0.462</td><td>0.632</td></tr><tr><td>PATQUEST-ensemble w/o synth. data</td><td>0.457</td><td>0.464</td><td>0.640</td></tr><tr><td>PATQUEST-ensemble</td><td>0.498</td><td>0.454</td><td>0.637</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Results on the development set for Task 1 EN-DE.", |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Submission results on the test set for Task 1 EN-DE.", |
|
"html": null |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"content": "<table><tr><td>Model</td><td colspan=\"3\">Pearson's r \u2191 MAE \u2193 RMSE \u2193</td></tr><tr><td>PATQUEST-mBERT</td><td>0.431</td><td>14.401</td><td>22.330</td></tr><tr><td>PATQUEST-mBERT variant 1</td><td>0.406</td><td>14.418</td><td>22.872</td></tr><tr><td>PATQUEST-mBERT variant 2</td><td>0.380</td><td>14.909</td><td>23.215</td></tr><tr><td>PATQUEST-XLM</td><td>0.374</td><td>16.245</td><td>23.647</td></tr></table>", |
|
"type_str": "table", |
|
"text": "table refers to the mBERT", |
|
"html": null |
|
}, |
|
"TABREF6": { |
|
"num": null, |
|
"content": "<table><tr><td>Model</td><td colspan=\"3\">Pearson's r \u2191 MAE \u2193 RMSE \u2193</td></tr><tr><td>Baseline</td><td>0.389</td><td>19.939</td><td>26.608</td></tr><tr><td>PATQUEST-mBERT</td><td>0.529</td><td>16.214</td><td>24.437</td></tr><tr><td>PATQUEST-XLM</td><td>0.546</td><td>15.821</td><td>23.846</td></tr><tr><td>PATQUEST-ensemble</td><td>0.573</td><td>15.611</td><td>23.327</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Results on the development set of WMT20 document-level task.", |
|
"html": null |
|
}, |
|
"TABREF7": { |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Submission results of PATQUEST models on the test set of WMT20 document-level task.", |
|
"html": null |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"content": "<table><tr><td>Model</td><td colspan=\"3\">Pearson's r \u2191 MAE \u2193 RMSE \u2193</td></tr><tr><td>Pretrained mBERT (A)</td><td>0.263</td><td>16.146</td><td>23.090</td></tr><tr><td>A + TSF</td><td colspan=\"2\">0.341 (+ 0.078) 15.302</td><td>23.749</td></tr><tr><td>A + TSP</td><td colspan=\"2\">0.375 (+ 0.112) 15.496</td><td>23.444</td></tr><tr><td>A + TSP + TSF</td><td colspan=\"2\">0.431 (+ 0.168) 14.401</td><td>22.330</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Excluding the disqualified team", |
|
"html": null |
|
}, |
|
"TABREF9": { |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Results on the development set of WMT20 document-level task adding up key components of our model.", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |