|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T03:40:59.989827Z" |
|
}, |
|
"title": "RTM Ensemble Learning Results at Quality Estimation Task", |
|
"authors": [ |
|
{ |
|
"first": "Ergun", |
|
"middle": [], |
|
"last": "Bi\u00e7ici", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Bogazi\u00e7i University orcid", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We obtain new results using referential translation machines (RTMs) with predictions mixed and stacked to obtain a better mixture of experts prediction. We are able to achieve better results than the baseline model in Task 1 subtasks. Our stacking results significantly improve the results on the training sets but decrease the test set results. RTMs can achieve to become the 5th among 13 models in ru-en subtask and 5th in the multilingual track of sentence-level Task 1 based on MAE.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We obtain new results using referential translation machines (RTMs) with predictions mixed and stacked to obtain a better mixture of experts prediction. We are able to achieve better results than the baseline model in Task 1 subtasks. Our stacking results significantly improve the results on the training sets but decrease the test set results. RTMs can achieve to become the 5th among 13 models in ru-en subtask and 5th in the multilingual track of sentence-level Task 1 based on MAE.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Quality estimation task in WMT20 (Specia et al., 2020 ) (QET20) address machine translation (MT) performance prediction (MTPP), where translation quality is predicted without using reference translations, at the sentence-(Tasks 1 and 2), word-(Task 2), and document-levels (Task 3). Task 1 predicts the sentence-level direct assessment (DA) in 7 language pairs categorized according to the MT resources available:", |
|
"cite_spans": [ |
|
{ |
|
"start": 33, |
|
"end": 53, |
|
"text": "(Specia et al., 2020", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 high-resource, English-German (en-de), English-Chinese (en-zh), and Russian-English (en-ru)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 medium-resource, Romanian-English (roen) and Estonian-English (et-en) , and", |
|
"cite_spans": [ |
|
{ |
|
"start": 47, |
|
"end": 71, |
|
"text": "Estonian-English (et-en)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 low-resource, Sinhalese-English (si-en) and Nepalese-English (ne-en).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "en-ru contains sentences from both Wikipedia and Reddit articles while others use only Wikipedia sentences with 7000 sentences for training, 1000 for development, and 1000 for testing. The target to predict in Task 1 is z-standardised DA scores, which changes the range from [0, 100] for DA scores to [3.178, \u22127.542] in z-standardized DA scores.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Train Test setting Training LM Task 1 (en-de) 8000 1000 bilingual 0.3 M 5 M Task 1 (en-zh) 8000 1000 monolingual en 0.2 M 3.5 M Task 1 (si-en) 8000 1000 monolingual en 0.2 M 3.5 M Task 1 (ne-en) 8000 1000 monolingual en 0.2 M 3.5 M Task 1 (et-en) 8000 1000 monolingual en 0.2 M 3.5 M Task 1 (ro-en) 8000 1000 monolingual en 0.2 M 3.5 M Task 1 (ru-en) 8000 1000 bilingual 0.2 M 4 M Task 2 (en-de) 8000 1000 bilingual 0.3 M 5 M Task 2 (en-zh) 8000 1000 monolingual en 0.2 M 3.5 M The target to predict in Task 2 is sentence HTER (human-targeted translation edit rate) scores (Snover et al., 2006) and binary classification of word-level translation errors. We participated in sentence-level subtasks, which include English-German and English-Chinese in Task 2. Table 1 lists the number of sentences in the training and test sets for each task and the number of instances used as interpretants in the referential translation machine (RTM) (Bi\u00e7ici, 2018; Bi\u00e7ici and Way, 2015) models (M for million).", |
|
"cite_spans": [ |
|
{ |
|
"start": 573, |
|
"end": 594, |
|
"text": "(Snover et al., 2006)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 936, |
|
"end": 950, |
|
"text": "(Bi\u00e7ici, 2018;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 951, |
|
"end": 972, |
|
"text": "Bi\u00e7ici and Way, 2015)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 759, |
|
"end": 766, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "RTM interpretants Task", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We tokenize and truecase all of the corpora using Moses' (Koehn et al., 2007) processing tools. 1 LMs are built using kenlm (Heafield et al., 2013) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 77, |
|
"text": "(Koehn et al., 2007)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 124, |
|
"end": 147, |
|
"text": "(Heafield et al., 2013)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "RTM interpretants Task", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use RTM models for building our prediction models. RTMs predict data translation between the instances in the training set and the test set using interpretants, data selected close to the task instances in bilingual training settings or monolingual language model (LM) settings. Interpretants provide context for the prediction task and are used during the derivation of the features measuring the closeness of the test sentences to the Figure 1 : RTM depiction: parfwd selects interpretants close to the training and test data using parallel corpus in bilingual settings and monolingual corpus in the target language or just the monolingual target corpus in monolingual settings; an MTPPS use interpretants and training data to generate training features and another use interpretants and test data to generate test features in the same feature space; learning and prediction takes place using these features as input.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 440, |
|
"end": 448, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "RTM for MTPP", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "training data, the difficulty of translating them, and to identify translation acts between any two data sets for building prediction models. With the enlarging parallel and monolingual corpora made available by WMT, the capability of the interpretant datasets selected to provide context for the training and test sets improve as can be seen in the data statistics of parfwd instance selection, parallel feature weight decay (Bi\u00e7ici, 2019) . RTMs use parfwd for instance selection and machine translation performance prediction system (MTPPS) (Bi\u00e7ici et al., 2013; Bi\u00e7ici and Way, 2015) for obtaining the features, which includes additional features from word alignment. Figure 1 depicts RTMs and explains the model building process.", |
|
"cite_spans": [ |
|
{ |
|
"start": 426, |
|
"end": 440, |
|
"text": "(Bi\u00e7ici, 2019)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 544, |
|
"end": 565, |
|
"text": "(Bi\u00e7ici et al., 2013;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 566, |
|
"end": 587, |
|
"text": "Bi\u00e7ici and Way, 2015)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 672, |
|
"end": 680, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "RTM for MTPP", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Additionally, we included the sum, mean, standard deviation, minimum, and maximum of alignment word log probabilities as features in Task 1. In Task 2, we included word alignment displacement features including the average of source and target displacements relative to the length of the source or target sentences respectively and absolute displacement relative to the maximum of source and target sentence lengths.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "RTM for MTPP", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Instead of resource based discernment, we treated en-de of Tasks 1 and 2 and ru-en as bilingual tasks where significant parallel corpora are available from WMT from previous years and the rest as monolingual, using solely English side of the corpora for deriving MTPP features. In accord, we treat en-de and ru-en as parallel MTPP and the rest as monolingual MTPP. RTM benefits from relevant data selection to be used as interpretants in both monolingual and bilingual settings. The related monolingual or bilingual datasets are used during feature extraction for the machine learning models of MT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "RTM for MTPP", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The machine learning models we use include ridge regression (RR), kernel ridge regression, support vector regression (SVR) (Boser et al., 1992) , gradient tree boosting, extremely randomized trees (Geurts et al., 2006) , and multi-layer perceptron (Bishop, 2006) as learning models in combination with feature selection (FS) (Guyon et al., 2002) and partial least squares (PLS) (Wold et al., 1984) where most of these models can be found in scikit-learn. 2 We experiment with:", |
|
"cite_spans": [ |
|
{ |
|
"start": 123, |
|
"end": 143, |
|
"text": "(Boser et al., 1992)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 197, |
|
"end": 218, |
|
"text": "(Geurts et al., 2006)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 248, |
|
"end": 262, |
|
"text": "(Bishop, 2006)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 325, |
|
"end": 345, |
|
"text": "(Guyon et al., 2002)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 378, |
|
"end": 397, |
|
"text": "(Wold et al., 1984)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 455, |
|
"end": 456, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "RTM for MTPP", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 including the statistics of the binary tags obtained as features extracted from word-level tag predictions for sentence-level prediction,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "RTM for MTPP", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 using RR to estimate the noise level for SVR, which obtains accuracy with 5% error compared with estimates obtained with known noise level (Cherkassky and Ma, 2004) and set = \u03c3/2.", |
|
"cite_spans": [ |
|
{ |
|
"start": 141, |
|
"end": 166, |
|
"text": "(Cherkassky and Ma, 2004)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "RTM for MTPP", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We use Pearson's correlation (r), mean absolute error (MAE), root mean squared error (RMSE), relative absolute error (RAE), relative MAE (MAER), and mean RAE relative (MRAER) as evaluation metrics (Bi\u00e7ici and Way, 2015) . Our best non-mix results are in Table 2 achieving 6th rank at best among 15 models in general.", |
|
"cite_spans": [ |
|
{ |
|
"start": 197, |
|
"end": 219, |
|
"text": "(Bi\u00e7ici and Way, 2015)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 254, |
|
"end": 261, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "RTM for MTPP", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We use prediction averaging (Bi\u00e7ici, 2018) to obtain a combined prediction from various prediction outputs better than the components, where the performance on the training set is used to obtain r P MAE RMSE Task 1 en-de 0.2622 (11) 0.5156 (8) 0.6828 (10) ru-en 0.6877 (8) 0.5138 (6) 0.6878 (7) en-zh 0.2310 (13) 0.5616 (6) 0.7298 (6) et-en 0.6067 (11) 0.5995 (8) 0.7284 (8) ne-en 0.5436 (11) 0.5308 (9) 0.6828 (9) si-en 0.5318 (10) 0.5003 (7) 0.6181 7ro-en 0.6990 11 weighted average of the top k predictions,\u0177 with evaluation metrics indexed by j \u2208 J and weights with w:", |
|
"cite_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 42, |
|
"text": "(Bi\u00e7ici, 2018)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 208, |
|
"end": 214, |
|
"text": "Task 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Mixture of Experts Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "w j,i = w j,i 1\u2212w j,\u00ee \u0177 \u0177 y \u00b5 k = 1 k k i=1\u0177 \u0177 y i MEAN \u0177 \u0177 y j,w j k = 1 k i=1 w j,i k i=1 w j,i\u0177 \u0177 y \u00ee \u0177 \u0177 y k = 1 |J| j\u2208J\u0177 \u0177 y j,w j k MIX", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mixture of Experts Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "(1) We assume independent predictions and use p i /(1 \u2212 p i ) for weights where p i represents the accuracy of the independent classifier i in a weighted majority ensemble (Kuncheva and Rodr\u00edguez, 2014) . We use the MIX prediction only when we obtain better results on the training set. We select the best model using r and mix the results using r, RAE, MRAER, and MAER. We filter out those results with higher than 0.875 relative evaluation metric scores.", |
|
"cite_spans": [ |
|
{ |
|
"start": 172, |
|
"end": 202, |
|
"text": "(Kuncheva and Rodr\u00edguez, 2014)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mixture of Experts Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We also use generalized ensemble method (GEM) as an alternative to MIX to combine using weights and correlation of the errors, C i,j , where GEM achieves smaller error than the best combined model (Perrone and Cooper, 1992) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 197, |
|
"end": 223, |
|
"text": "(Perrone and Cooper, 1992)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mixture of Experts Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "y GEM = L i=1 w i \u03c8 i (x) = y + L i=1 w i i C i,j = E[ i , j ] = (\u03c8 i (x) \u2212 y) T (\u03c8 i (x) \u2212 y) w i = L j=1 C i,j L k=1 L j=1 C k,j", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mixture of Experts Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Model combination ( Figure 2 ) selects top k combined predictions and adds them to the set of predictions where the next layer can use another model combination step or just pick the best model according to the results on the training set. We use a two layer combination where the second layer is a combination of all of the predictions obtained. The last layer is an arg max. We also use stacking (STACK) to build higher level models using predictions from base prediction models where they can also use the probability associated with the predictions (Ting and Witten, 1999) . The stacking models use the predictions from predictors as features and additional selected features and build second level predictors. Stacking with m predictors is depicted in Figure 3 where predictions are used as features for the predictors in the next level. Martins et al. 2017used a hybrid stacking model to combine the word-level predictions from 15 predictors using neural networks with different initializations together with the previous features from a linear model. Our stacking results also use top features from the data similar to the pass through feature of the stacking regressor of sklearn. 3 For these features, we con-r P trans GEM mix STACK sider at most the top 15% of the features selected with feature selection. RTM can achieve better results than the baseline model in Task 1 in all tasks participated 4 where the baseline is a neural predictor-estimator approach implemented in OpenKiwi (Kepler et al.) . Our training r P results are in Table 3 . Our test set results using GEM mix and MIX are in Table 4 where we obtain 5th rank among 11 submissions in the multilingual subtask according to MAE. Official evaluation metric is r P .", |
|
"cite_spans": [ |
|
{ |
|
"start": 553, |
|
"end": 576, |
|
"text": "(Ting and Witten, 1999)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 1494, |
|
"end": 1509, |
|
"text": "(Kepler et al.)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 20, |
|
"end": 28, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 757, |
|
"end": 765, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 1544, |
|
"end": 1551, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 1604, |
|
"end": 1611, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Mixture of Experts Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Before model combination, we further filter prediction results from different machine learning models based on the results on the training set to decrease the number of models combined and improve the results. A criteria that we use is MREAR \u2265 0.875 since MRAER computes the mean relative RAE score, which we want to be less than 1. In general, the combined model is better than the 4 Task1:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mixture of Experts Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "https://competitions.codalab. org/competitions/24447#results,Task2: https://competitions.codalab.org/ competitions/24515#results r P MAE RMSE Task 1 en-de 0.2289 (15) 0.6319 (13) 0.7754 (13) ru-en 0.6057 (8) 0.7526 (10) 0.9917 (10) en-zh 0.1504 (15) 0.8043 (11) 1.0249 (11) et-en 0.4014 (13) 1.1209 (13) 1.3892 (13) ne-en 0.4856 (13) 0.5662 (10) 0.7688 (10) si-en 0.3720 141.1118 141.2967 14ro-en 0.5858 15 best model in the set and stacking achieves better results than MIX on the training set. However, stacking models significantly improve the results on the training data but obtain decreased scores on the test set (Table 5 ).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 142, |
|
"end": 148, |
|
"text": "Task 1", |
|
"ref_id": "TABREF0" |
|
}, |
|
{ |
|
"start": 620, |
|
"end": 628, |
|
"text": "(Table 5", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Mixture of Experts Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Referential translation machines pioneer a language independent approach and remove the need to access any task or domain specific information or resource and can achieve top performance in automatic, accurate, and language independent prediction of translation scores. We present RTM results with ensemble models and stacking.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "https://github.com/moses-smt/ mosesdecoder/tree/master/scripts", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://scikit-learn.org/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://scikit-learn.org/stable/ modules/generated/sklearn.ensemble. StackingRegressor.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The research reported here received financial support from the Scientific and Technological Research Council of Turkey (T\u00dcB\u0130TAK) and Bogazi\u00e7i University, Turkey.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "RTM results for predicting translation performance", |
|
"authors": [ |
|
{ |
|
"first": "Ergun", |
|
"middle": [], |
|
"last": "Bi\u00e7ici", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proc. of the Third Conf. on Machine Translation (WMT18)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "765--769", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ergun Bi\u00e7ici. 2018. RTM results for predicting transla- tion performance. In Proc. of the Third Conf. on Ma- chine Translation (WMT18), pages 765-769, Brus- sels, Belgium.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Machine translation with parfda, moses, kenlm, nplm, and pro", |
|
"authors": [ |
|
{ |
|
"first": "Ergun", |
|
"middle": [], |
|
"last": "Bi\u00e7ici", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proc. of the Fourth Conf. on Machine Translation (WMT19)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ergun Bi\u00e7ici. 2019. Machine translation with parfda, moses, kenlm, nplm, and pro. In Proc. of the Fourth Conf. on Machine Translation (WMT19), Florence, Italy.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Predicting sentence translation quality using extrinsic and language independent features. Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Ergun", |
|
"middle": [], |
|
"last": "Bi\u00e7ici", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Declan", |
|
"middle": [], |
|
"last": "Groves", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Van Genabith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "27", |
|
"issue": "", |
|
"pages": "171--192", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s10590-013-9138-4" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ergun Bi\u00e7ici, Declan Groves, and Josef van Genabith. 2013. Predicting sentence translation quality using extrinsic and language independent features. Ma- chine Translation, 27(3-4):171-192.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Referential translation machines for predicting semantic similarity. Language Resources and Evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Ergun", |
|
"middle": [], |
|
"last": "Bi\u00e7ici", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Way", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--27", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s10579-015-9322-7" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ergun Bi\u00e7ici and Andy Way. 2015. Referential trans- lation machines for predicting semantic similarity. Language Resources and Evaluation, pages 1-27.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Pattern Recognition and Machine Learning (Information Science and Statistics)", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Bishop", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher M. Bishop. 2006. Pattern Recognition and Machine Learning (Information Science and Statis- tics). Springer-Verlag, Berlin, Heidelberg.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A training algorithm for optimal margin classifiers", |
|
"authors": [ |
|
{ |
|
"first": "Bernhard", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Boser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isabelle", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Guyon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vladimir", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Vapnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "Proceedings of the Fifth Annual Workshop on Computational Learning Theory, COLT '92", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "144--152", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/130385.130401" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bernhard E. Boser, Isabelle M. Guyon, and Vladimir N. Vapnik. 1992. A training algo- rithm for optimal margin classifiers. In Proceedings of the Fifth Annual Workshop on Computational Learning Theory, COLT '92, page 144-152, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Practical selection of svm parameters and noise estimation for svm regression", |
|
"authors": [ |
|
{ |
|
"first": "Vladimir", |
|
"middle": [], |
|
"last": "Cherkassky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yunqian", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Neural Networks", |
|
"volume": "17", |
|
"issue": "1", |
|
"pages": "113--126", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/S0893-6080(03)00169-2" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vladimir Cherkassky and Yunqian Ma. 2004. Practical selection of svm parameters and noise estimation for svm regression. Neural Networks, 17(1):113-126.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Extremely randomized trees", |
|
"authors": [ |
|
{ |
|
"first": "Pierre", |
|
"middle": [], |
|
"last": "Geurts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Damien", |
|
"middle": [], |
|
"last": "Ernst", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Louis", |
|
"middle": [], |
|
"last": "Wehenkel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Machine Learning", |
|
"volume": "63", |
|
"issue": "1", |
|
"pages": "3--42", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pierre Geurts, Damien Ernst, and Louis Wehenkel. 2006. Extremely randomized trees. Machine Learn- ing, 63(1):3-42.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Gene selection for cancer classification using support vector machines. Machine Learning", |
|
"authors": [ |
|
{ |
|
"first": "Isabelle", |
|
"middle": [], |
|
"last": "Guyon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Barnhill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vladimir", |
|
"middle": [], |
|
"last": "Vapnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "46", |
|
"issue": "", |
|
"pages": "389--422", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Isabelle Guyon, Jason Weston, Stephen Barnhill, and Vladimir Vapnik. 2002. Gene selection for cancer classification using support vector machines. Ma- chine Learning, 46(1-3):389-422.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Scalable modified Kneser-Ney language model estimation", |
|
"authors": [ |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Heafield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Pouzyrevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "51st Annual Meeting of the Assoc. for Comp. Ling", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "690--696", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenneth Heafield, Ivan Pouzyrevsky, Jonathan H. Clark, and Philipp Koehn. 2013. Scalable modified Kneser-Ney language model estimation. In 51st An- nual Meeting of the Assoc. for Comp. Ling., pages 690-696, Sofia, Bulgaria.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Martins. OpenKiwi: An open source framework for quality estimation", |
|
"authors": [ |
|
{ |
|
"first": "Fabio", |
|
"middle": [], |
|
"last": "Kepler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonay", |
|
"middle": [], |
|
"last": "Tr\u00e9nous", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Treviso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miguel", |
|
"middle": [], |
|
"last": "Vera", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e9", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proc. of the 57th Annual Meeting of the Assoc. for Computational Linguistics: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "117--122", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fabio Kepler, Jonay Tr\u00e9nous, Marcos Treviso, Miguel Vera, and Andr\u00e9 F. T.\" Martins. OpenKiwi: An open source framework for quality estimation. In Proc. of the 57th Annual Meeting of the Assoc. for Computa- tional Linguistics: System Demonstrations\", month = 7, year = 2019, address = Florence, Italy, pub- lisher = Assoc. for Computational Linguistics, pages = 117-122,.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Moses: Open source toolkit for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Bertoldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brooke", |
|
"middle": [], |
|
"last": "Cowan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wade", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Moran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Zens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ondrej", |
|
"middle": [], |
|
"last": "Bojar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Constantin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evan", |
|
"middle": [], |
|
"last": "Herbst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "45th Annual Meeting of the Assoc. for Comp. Ling", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--180", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn, Hieu Hoang, Alexandra Birch, Chris Callison-Burch, Marcello Federico, Nicola Bertoldi, Brooke Cowan, Wade Shen, Christine Moran, Richard Zens, Chris Dyer, Ondrej Bojar, Alexandra Constantin, and Evan Herbst. 2007. Moses: Open source toolkit for statistical machine translation. In 45th Annual Meeting of the Assoc. for Comp. Ling., pages 177-180.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "A weighted voting framework for classifiers ensembles", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Ludmila", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Kuncheva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rodr\u00edguez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Knowledge and Information Systems", |
|
"volume": "38", |
|
"issue": "2", |
|
"pages": "259--275", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ludmila I. Kuncheva and Juan J. Rodr\u00edguez. 2014. A weighted voting framework for classifiers en- sembles. Knowledge and Information Systems, 38(2):259-275.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Pushing the limits of translation quality estimation", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Andr\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcin", |
|
"middle": [], |
|
"last": "Martins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabio", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Junczys-Dowmunt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ram\u00f3n", |
|
"middle": [], |
|
"last": "Kepler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Astudillo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Hokamp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Grundkiewicz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Comp. Ling", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "205--218", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andr\u00e9 F.T. Martins, Marcin Junczys-Dowmunt, Fabio N. Kepler, Ram\u00f3n Astudillo, Chris Hokamp, and Roman Grundkiewicz. 2017. Pushing the limits of translation quality estimation. Transactions of the Association for Comp. Ling., 5:205-218.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "When networks disagree: Ensemble methods for hybrid neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Perrone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leon", |
|
"middle": [], |
|
"last": "Cooper", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Perrone and Leon Cooper. 1992. When net- works disagree: Ensemble methods for hybrid neu- ral networks. Technical report, Brown Univ. Provi- dence RI Inst. for Brain and Neural Systems.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "A Study of Translation Edit Rate with Targeted Human Annotation", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Snover", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bonnie", |
|
"middle": [], |
|
"last": "Dorr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linnea", |
|
"middle": [], |
|
"last": "Micciulla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Makhoul", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Assoc. for Machine Translation in the Americas", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Snover, Bonnie Dorr, Richard Schwartz, Lin- nea Micciulla, and John Makhoul. 2006. A Study of Translation Edit Rate with Targeted Human An- notation. In Assoc. for Machine Translation in the Americas.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Findings of the wmt 2020 shared task on quality estimation", |
|
"authors": [ |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fr\u00e9d\u00e9ric", |
|
"middle": [], |
|
"last": "Blain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marina", |
|
"middle": [], |
|
"last": "Fomicheva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erick", |
|
"middle": [], |
|
"last": "Fonseca", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vishrav", |
|
"middle": [], |
|
"last": "Chaudhary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Guzm\u00e1n", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e9 Ft", |
|
"middle": [], |
|
"last": "Martins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proc. of the Fifth Conf. on Machine Translation: Shared Task Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lucia Specia, Fr\u00e9d\u00e9ric Blain, Marina Fomicheva, Erick Fonseca, Vishrav Chaudhary, Francisco Guzm\u00e1n, and Andr\u00e9 FT Martins. 2020. Findings of the wmt 2020 shared task on quality estimation. In Proc. of the Fifth Conf. on Machine Translation: Shared Task Papers, Online.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Issues in stacked generalization", |
|
"authors": [ |
|
{ |
|
"first": "Kai", |
|
"middle": [ |
|
"Ming" |
|
], |
|
"last": "Ting", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Witten", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Journal of Artificial Intelligence Research", |
|
"volume": "10", |
|
"issue": "", |
|
"pages": "271--289", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kai Ming Ting and Ian H. Witten. 1999. Issues in stacked generalization. Journal of Artificial Intel- ligence Research, 10:271-289.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "The collinearity problem in linear regression. the partial least squares (pls) approach to generalized inverses", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Wold", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Ruhe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Wold", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Dunn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1984, |
|
"venue": "SIAM Journal on Scientific and Statistical Computing", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "735--743", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Wold, A. Ruhe, H. Wold, and III Dunn, W. J. 1984. The collinearity problem in linear regression. the partial least squares (pls) approach to generalized in- verses. SIAM Journal on Scientific and Statistical Computing, 5:735-743.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"text": "Model combination.", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"text": "Stacking use predictions as features.", |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"text": "Number of instances in the tasks and the size of the interpretants used.", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"text": "", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>: RTM test results in sentence-level MTPP in</td></tr><tr><td>tasks 1 and 2 using the best non-mix result with (ranks).</td></tr><tr><td>r P is Pearson's correlation.</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"text": "", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td/><td colspan=\"3\">: RTM train results in sentence-level MTPP in</td></tr><tr><td colspan=\"4\">tasks 1 and 2. r P is Pearson's correlation.</td></tr><tr><td/><td>r P</td><td>MAE</td><td>RMSE</td></tr><tr><td/><td colspan=\"3\">en-de 0.2804 (10) 0.5139 (8) 0.6762 (7)</td></tr><tr><td/><td colspan=\"3\">ru-en 0.7009 (7) 0.4957 (5) 0.6776 (5)</td></tr><tr><td>Task 1</td><td colspan=\"3\">en-zh 0.2310 (13) 0.5616 (6) 0.7298 (6) et-en 0.6051 (11) 0.5998 (8) 0.7268 (8) ne-en 0.6186 (9) 0.4990 (9) 0.6422 (8)</td></tr><tr><td/><td colspan=\"3\">si-en 0.5493 (10) 0.4909 (6) 0.6055 (6)</td></tr><tr><td/><td colspan=\"3\">ro-en 0.7367 (10) 0.4967 (7) 0.6167 (7)</td></tr><tr><td/><td colspan=\"3\">multi 0.5063 (8) 0.5249 (5) 0.6628 (6)</td></tr><tr><td>Task 2</td><td colspan=\"3\">en-de 0.2631 (15) 0.1601 (14) 0.1983 (15) en-zh 0.4029 (15) 0.1574 (14) 0.1933 (15)</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"text": "RTM test results in sentence-level MTPP in tasks 1 and 2 using the best GEM mix + mix result.", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF7": { |
|
"text": "RTM test results in sentence-level MTPP in tasks 1 and 2 using stacking.", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |