|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:59:32.515595Z" |
|
}, |
|
"title": "An Overview of Uncertainty Calibration for Text Classification and the Role of Distillation", |
|
"authors": [ |
|
{ |
|
"first": "Han", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ramakanth", |
|
"middle": [], |
|
"last": "Pasunuru", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yaniv", |
|
"middle": [], |
|
"last": "Ovadia", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Fertig", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [], |
|
"last": "Nado", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Sculley", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Nowozin", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Dillon", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Balaji", |
|
"middle": [], |
|
"last": "Lakshminarayanan", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jasper", |
|
"middle": [ |
|
"2019" |
|
], |
|
"last": "Snoek", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Gabriel", |
|
"middle": [], |
|
"last": "Pereyra", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Tucker", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Chorowski", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [ |
|
"2017" |
|
], |
|
"last": "Hinton", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yada", |
|
"middle": [], |
|
"last": "Pruksachatkun", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Yeres", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Haokun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Phang", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Mon", |
|
"middle": [], |
|
"last": "Htut", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Tenney", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R 2020" |
|
], |
|
"last": "Bowman", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jiant", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Rewon", |
|
"middle": [], |
|
"last": "Child", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Dario", |
|
"middle": [], |
|
"last": "Amodei", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [ |
|
"2019" |
|
], |
|
"last": "Sutskever", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Language", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Konstantin", |
|
"middle": [], |
|
"last": "Lopyrev", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [ |
|
"2016" |
|
], |
|
"last": "Liang", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Siddharth", |
|
"middle": [], |
|
"last": "Reddy", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Anca", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Dragan", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Levine", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [ |
|
"Tulio" |
|
], |
|
"last": "Ribeiro", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Guestrin", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Schwarz", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yee", |
|
"middle": [], |
|
"last": "Teh", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Bapst", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Czarnecki", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Quan", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Kirkpatrick", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Raia", |
|
"middle": [], |
|
"last": "Hadsell", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Recent advances in NLP systems, notably the pretraining-and-finetuning paradigm, have achieved great success in predictive accuracy. However, these systems are usually not well calibrated for uncertainty out-of-the-box. Many recalibration methods have been proposed in the literature for quantifying predictive uncertainty and calibrating model outputs, with varying degrees of complexity. In this work, we present a systematic study of a few of these methods. Focusing on the text classification task and finetuned large pretrained language models, we first show that many of the finetuned models are not well calibrated out-of-the-box, especially when the data come from out-of-domain settings. Next, we compare the effectiveness of a few widely-used recalibration methods (such as ensembles, temperature scaling). Then, we empirically illustrate a connection between distillation and calibration. We view distillation as a regularization term encouraging the student model to output uncertainties that match those of a teacher model. With this insight, we develop simple recalibration methods based on distillation with no additional inference-time cost. We show on the GLUE benchmark that our simple methods can achieve competitive out-of-domain (OOD) calibration performance w.r.t. more expensive approaches. Finally, we include ablations to understand the usefulness of components of our proposed method and examine the transferability of calibration via distillation. Volodymyr Kuleshov and Stefano Ermon. 2017. Estimating uncertainty online against an adversary. In AAAI. Volodymyr Kuleshov and Percy S Liang. 2015. Calibrated structured prediction. In NeurIPS.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Recent advances in NLP systems, notably the pretraining-and-finetuning paradigm, have achieved great success in predictive accuracy. However, these systems are usually not well calibrated for uncertainty out-of-the-box. Many recalibration methods have been proposed in the literature for quantifying predictive uncertainty and calibrating model outputs, with varying degrees of complexity. In this work, we present a systematic study of a few of these methods. Focusing on the text classification task and finetuned large pretrained language models, we first show that many of the finetuned models are not well calibrated out-of-the-box, especially when the data come from out-of-domain settings. Next, we compare the effectiveness of a few widely-used recalibration methods (such as ensembles, temperature scaling). Then, we empirically illustrate a connection between distillation and calibration. We view distillation as a regularization term encouraging the student model to output uncertainties that match those of a teacher model. With this insight, we develop simple recalibration methods based on distillation with no additional inference-time cost. We show on the GLUE benchmark that our simple methods can achieve competitive out-of-domain (OOD) calibration performance w.r.t. more expensive approaches. Finally, we include ablations to understand the usefulness of components of our proposed method and examine the transferability of calibration via distillation. Volodymyr Kuleshov and Stefano Ermon. 2017. Estimating uncertainty online against an adversary. In AAAI. Volodymyr Kuleshov and Percy S Liang. 2015. Calibrated structured prediction. In NeurIPS.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The recent success of NLP systems, notably the pretraining-and-finetuning paradigm has led to widespread applications (Peters et al., 2018; Devlin et al., 2019; Radford et al., 2019) . However, these systems are not always well-calibrated; in many high-stake decision-making scenarios such as med-ical diagnosis, even small errors would have large damage. Suppose an ML system predicts a 20% probability a patient has cancer whereas the reality is 40%, diagnosis relying on inaccurate estimates could lead to devastating consequences (Kumar et al., 2019) . Further, interpreting and communicating these uncertainties facilitates better trust between humans and ML systems (Bansal et al., 2020; Wilder et al., 2020; Ribeiro et al., 2016 Ribeiro et al., , 2018 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 139, |
|
"text": "(Peters et al., 2018;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 140, |
|
"end": 160, |
|
"text": "Devlin et al., 2019;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 161, |
|
"end": 182, |
|
"text": "Radford et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 534, |
|
"end": 554, |
|
"text": "(Kumar et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 672, |
|
"end": 693, |
|
"text": "(Bansal et al., 2020;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 694, |
|
"end": 714, |
|
"text": "Wilder et al., 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 715, |
|
"end": 735, |
|
"text": "Ribeiro et al., 2016", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 736, |
|
"end": 758, |
|
"text": "Ribeiro et al., , 2018", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Hence, it is increasingly important for users to understand not only when the systems would succeed, but also when they could fail. One seemingly straightforward approach is to have the systems output predictions and some measure of their confidence/uncertainty. Users could then use both the predictions and associated uncertainties to decide how much they would trust the prediction. For example, one might decide to take an umbrella to work only if the confidence of the rain prediction is more than 50%. For many statistical methods, confidence/uncertainty is either part of the system by design (e.g., Bayesian methods) or could be efficiently estimated (e.g., linear regressions). Unfortunately, for large-scale DNNs, estimating uncertainty becomes a challenge (Gal, 2016) : e.g., nominal probabilities from the softmax function are shown to be uncalibrated estimates of model uncertainty (Platt, 1999; Niculescu-Mizil and Caruana, 2005; Guo et al., 2017; Ovadia et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 767, |
|
"end": 778, |
|
"text": "(Gal, 2016)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 895, |
|
"end": 908, |
|
"text": "(Platt, 1999;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 909, |
|
"end": 943, |
|
"text": "Niculescu-Mizil and Caruana, 2005;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 944, |
|
"end": 961, |
|
"text": "Guo et al., 2017;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 962, |
|
"end": 982, |
|
"text": "Ovadia et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we present a systematic study on recalibrating current NLP systems, particularly those that fall in the recent popular pretraining-andfinetuning paradigm (Hendrycks et al., 2020; Desai and Durrett, 2020) , as they are widely deployed in recent state-of-the-art systems and hence it is important that they are well calibrated for safety and transparency. However, the methods discussed in this work could generalize to a broader range of systems. We focus on the calibration not only of the task itself, but also under dataset distributional shift (Ovadia et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 168, |
|
"end": 192, |
|
"text": "(Hendrycks et al., 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 193, |
|
"end": 217, |
|
"text": "Desai and Durrett, 2020)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 561, |
|
"end": 582, |
|
"text": "(Ovadia et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We start by introducing uncertainty and calibration, and cover related advances in the deep learning literature. In addition to widely-used maximum calibration error and expected calibration error, we follow previous works (Ovadia et al., 2019; Kumar et al., 2019) and include additional calibration evaluation metrics for better comparisons (e.g., Brier scores and p calibration error).", |
|
"cite_spans": [ |
|
{ |
|
"start": 223, |
|
"end": 244, |
|
"text": "(Ovadia et al., 2019;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 245, |
|
"end": 264, |
|
"text": "Kumar et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We conduct experiments on GLUE classification tasks (Wang et al., 2019) and show that finetuned language models are usually not calibrated out-of-the-box, especially when the data comes from a distribution different from the training data. We use the term \"out-of-domain\" (or \"out-ofdistribution\", OOD) to refer to the setting where the train and evaluation data come from different \"distributions\". Related works in NLP have considered data from similar tasks but from different datasets as OOD (Ovadia et al., 2019; Hendrycks and Gimpel, 2017) . Next, in order to make models more calibrated, we study some of the widelyused recalibration methods, with various degrees of effectiveness and computational cost. For example, ensembling models has been shown to be very effective in out-of-domain settings (Ovadia et al., 2019) , but the cost of computation scales with the size of ensembles. On the other hand, distillation (Hinton et al., 2015 ) is a widely-known method for improving the system's performance by learning from a stronger teacher model. In this work, we empirically examine the connection between distillation and calibration. Notably, we view the objective function of distillation as a regularization term that encourages the student model to match the predictive uncertainty of a stronger, more calibrated teacher model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 52, |
|
"end": 71, |
|
"text": "(Wang et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 492, |
|
"end": 517, |
|
"text": "OOD (Ovadia et al., 2019;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 518, |
|
"end": 545, |
|
"text": "Hendrycks and Gimpel, 2017)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 805, |
|
"end": 826, |
|
"text": "(Ovadia et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 924, |
|
"end": 944, |
|
"text": "(Hinton et al., 2015", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We conduct analysis experiments to show that the teacher's calibration performance could be distilled into the student model, even when the teacher model's accuracy remains similar. With this insight, we show that simple methods based on distillation could achieve competitive performance in out-ofdomain calibration, without introducing extra computation at inference time. Finally, we also conduct ablation experiments to understand the usefulness of components of the method. In summary, our contributions are listed as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We present a systematic study on the performance of various recalibration methods on finetuned language models for both in-domain and out-of-domain settings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We empirically examine the connection between distillation and calibration, and conduct experiments showing that distillation can distill calibration performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We describe two simple recalibration methods, and experimental results demonstrate their competitiveness in the out-of-domain settings; finally, we also ablate method's components and measure the extent to which distillation transfers teachers' calibration improvement.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Due to space constraints, we present some of the most relevant materials in the main paper. Please see the appendix (Sec. A) for extended background and related works. The quality of the uncertainty measurement is usually measured via calibration (Kendall and Gal, 2017) . In the context of calibration, the uncertainties often refer to predictive probabilities. The model is calibrated if the predictive probabilities match the empirical frequency of the data (Gal, 2016) . Let\u0176 andP be the predicted class and its associated confidence of a neural network. We would like the confidence estimatesP to be calibrated, which intuitively means that we wantP to represent true probabilities (Guo et al., 2017) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 247, |
|
"end": 270, |
|
"text": "(Kendall and Gal, 2017)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 461, |
|
"end": 472, |
|
"text": "(Gal, 2016)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 687, |
|
"end": 705, |
|
"text": "(Guo et al., 2017)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P(\u0176 = Y |P = p) = p, \u2200p \u2208 [0, 1].", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Background and Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Suppose a classification model is given N input examples, and made predictions\u0177 1 , ...,\u0177 N , each withp = 0.35. We would expect 35% of the predictions would be correct. The problem of uncertainty/confidence calibration and confidence scores have been studied and applied in various settings such as structured prediction problems (Kuleshov and Liang, 2015), online recalibration (with potentially adversarial/OOD input) (Kuleshov and Ermon, 2017), model regularization (Pereyra et al., 2017) , and misclassified/OOD examples detection (Hendrycks and Gimpel, 2017) . In practice, however, perfect calibration is almost impossible (Guo et al., 2017) , and estimating the first term in Eq. 1 is not straightforward using finite samples, because in most casesP is a continuous random variable (Guo et al., 2017; Kumar et al., 2019) . In Sec. 3, we describe ways to estimate the calibration performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 470, |
|
"end": 492, |
|
"text": "(Pereyra et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 536, |
|
"end": 564, |
|
"text": "(Hendrycks and Gimpel, 2017)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 630, |
|
"end": 648, |
|
"text": "(Guo et al., 2017)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 790, |
|
"end": 808, |
|
"text": "(Guo et al., 2017;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 809, |
|
"end": 828, |
|
"text": "Kumar et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "It has been widely observed that modern neural networks are usually not calibrated out of the box (Platt, 1999; Zadrozny and Elkan, 2001; Guo et al., 2017; Ovadia et al., 2019) . Recalibration methods improve calibration by transforming un-calibrated outputs into calibrated outputs/probabilities, and they include scaling-based methods (Platt, 1999; Guo et al., 2017) , histogrambinning-based methods (Guo et al., 2017; Zadrozny and Elkan, 2001) , and ensembles (Lakshminarayanan et al., 2017) . Recently, Kumar et al. (2019) proposed the scaling-binning calibrator and a more sample-efficient estimator of calibration error. In our work, we describe simple approaches that combine the strength of ensembles and temperature scaling without introducing computation at inference time; we further apply the scaling-binning calibrator to ensure calibration.", |
|
"cite_spans": [ |
|
{ |
|
"start": 98, |
|
"end": 111, |
|
"text": "(Platt, 1999;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 112, |
|
"end": 137, |
|
"text": "Zadrozny and Elkan, 2001;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 138, |
|
"end": 155, |
|
"text": "Guo et al., 2017;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 156, |
|
"end": 176, |
|
"text": "Ovadia et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 350, |
|
"text": "(Platt, 1999;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 351, |
|
"end": 368, |
|
"text": "Guo et al., 2017)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 402, |
|
"end": 420, |
|
"text": "(Guo et al., 2017;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 421, |
|
"end": 446, |
|
"text": "Zadrozny and Elkan, 2001)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 463, |
|
"end": 494, |
|
"text": "(Lakshminarayanan et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 497, |
|
"end": 526, |
|
"text": "Recently, Kumar et al. (2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Ensemble-based methods work by aggregating multiple networks trained independently on the entire dataset, and has been shown to achieve strong performance in out-of-domain calibration (Ovadia et al., 2019; Lakshminarayanan et al., 2017) . More generally, there are randomization-based ensembles and boosting-based ensembles. Within the randomization-based ensembles, we use the entire training dataset to train each model instead of different bootstrap samples of the original training set (Lakshminarayanan et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 184, |
|
"end": 205, |
|
"text": "(Ovadia et al., 2019;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 206, |
|
"end": 236, |
|
"text": "Lakshminarayanan et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 490, |
|
"end": 521, |
|
"text": "(Lakshminarayanan et al., 2017)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Temperature scaling is an extension of Platt scaling (Guo et al., 2017) . It uses a single scalar parameter T > 0 for all classes. Given output z i , the confidence prediction is:", |
|
"cite_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 71, |
|
"text": "(Guo et al., 2017)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p i = max k \u03c3(z i,k /T ).", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Background and Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "An extension, called heteroscedastic regression, is used in our work, which replaces the constant scalar with learned values (Kendall and Gal, 2017; Kendall et al., 2018) . Knowledge distillation (Hinton et al., 2015 ) is a compression technique in which a compact model (usually referred to as the student model) is trained to mimic the behavior of a more powerful teacher model. In the context of classification, knowledge distillation works by augmenting the loss function with an additional term D KL (p i p j ) where p i = softmax(z i /T ) and p j = softmax(z j /T ) with z i and z j the logits from two models, and T controls the smoothness of the output distribution. In this work, we show that distillation can also be used to distill calibration performance, and use it to build simple yet competitive recalibration methods.", |
|
"cite_spans": [ |
|
{ |
|
"start": 125, |
|
"end": 148, |
|
"text": "(Kendall and Gal, 2017;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 170, |
|
"text": "Kendall et al., 2018)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 196, |
|
"end": 216, |
|
"text": "(Hinton et al., 2015", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Concurrently, Desai and Durrett (2020) studied the calibration of pretrained transformers when finetuned to downstream tasks, and Hendrycks et al. (2020) studied the out-of-distribution robustness of pretrained transformers. We are different from them in that first we present a systematic study on the out-of-distribution calibration; second we draw insights from the connection between distillation and temperature scaling to design simple yet competitive recalibration methods; third, we conduct experiments to understand the connection between them empirically; finally, we also include a more comprehensive set of calibration evaluations following Ovadia et al. 2019and Kumar et al. (2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 14, |
|
"end": 38, |
|
"text": "Desai and Durrett (2020)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 130, |
|
"end": 153, |
|
"text": "Hendrycks et al. (2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 675, |
|
"end": 694, |
|
"text": "Kumar et al. (2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Let X be the input space, and Y = {1, ..., K} be the label space, and X \u2208 X and Y \u2208 Y be random variables denoting the input and the label, respectively. Further, let f : X \u2192 [0, 1] K be a neural network that outputs the model's confidence for each class. For simplicity of notation, we defin\u00ea", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Calibration Error Metrics", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Y = arg max j f (X) j , andP = max j f (X) j .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Calibration Error Metrics", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Expected Calibration Error. One notion of miscalibration is the expected difference between confidence and accuracy,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Calibration Error Metrics", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "ECE(f ) = E P Y =\u0176 |P =P \u2212P . (3)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Calibration Error Metrics", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "As mentioned in Sec. 2, this cannot be estimated using finitely many samples ifP is a continuous random variable. Expected Calibration Error (Naeini et al., 2015; Guo et al., 2017) , or ECE, approximates this via partitioning predictions into multiple bins and computing the weighted average. Maximum Calibration Error. In high-risk scenarios, we might be interested in measuring the worst-case performance. Maximum Calibration Error (Naeini et al., 2015; Guo et al., 2017) , or MCE, estimates the following quantity via binning,", |
|
"cite_spans": [ |
|
{ |
|
"start": 141, |
|
"end": 162, |
|
"text": "(Naeini et al., 2015;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 163, |
|
"end": 180, |
|
"text": "Guo et al., 2017)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 428, |
|
"end": 455, |
|
"text": "Error (Naeini et al., 2015;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 456, |
|
"end": 473, |
|
"text": "Guo et al., 2017)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Calibration Error Metrics", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "MCE(f ) = max P \u0176 =Y |P =P \u2212P . (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Calibration Error Metrics", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Brier Score. Calibration alone is not sufficient. We could construct cases in which the outputs of the model are calibrated but not useful. An example includes always outputting 50% in a binary classification task containing 50% of both labels (Kumar et al., 2019) . An alternative measure is the Brier score (Brier, 1950) ", |
|
"cite_spans": [ |
|
{ |
|
"start": 244, |
|
"end": 264, |
|
"text": "(Kumar et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 309, |
|
"end": 322, |
|
"text": "(Brier, 1950)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Calibration Error Metrics", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": ", E[(f (X) \u2212 Y ) 2 ].", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Calibration Error Metrics", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Note that the Brier Score is a proper scoring rule, thus the optimum score corresponds to a system with perfect calibration. We refer a more detailed discussion on proper scoring rule to Lakshminarayanan et al. (2017) (Sec 2.2). An extension of Brier Score is Brier Skill Scores (BSS). BSS is favored when the classes are imbalanced. In our early experiments, we did not observe significant ranking changes between these two measures, so we report Brier Score for simplicity. 1 p Calibration Error. A generalized notion of the calibration error is described in Kumar et al. 2019,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Calibration Error Metrics", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "CE(f ) = E P(Y =\u0176 |P =P ) \u2212P p 1/p . (5)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Calibration Error Metrics", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "This recovers the MCE when p = \u221e and ECE when p = 1 (Kumar et al., 2019). When p = 2, we refer to it as Squared Calibration Error (SCE). 2 This is estimated via binning the outputs and labels in practice similar to ECE and MCE. The plugin estimate for each term in the calibration error has been shown to be a biased estimate in Kumar et al. (2019) , and the authors encouraged the use of a debiased estimator for the calibration error. We refer to this as the debiased Squared Calibration Error.", |
|
"cite_spans": [ |
|
{ |
|
"start": 329, |
|
"end": 348, |
|
"text": "Kumar et al. (2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Calibration Error Metrics", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "As noted in Sec. 2, the key to estimating the calibration error is estimating the conditional expectation", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Underestimation of Calibration Errors for Model with Continuous Outputs", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "E[Y |f (X)]. However, if f (X) is continuous, with- out smoothness assumptions on E[Y |f (X)]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Underestimation of Calibration Errors for Model with Continuous Outputs", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": ", this is impossible (Kumar et al., 2019) . An approximation could be made via binning the outputs into B intervals, as is done in most of the metrics aforementioned. However, Kumar et al. (2019) showed that the binned version always has a lower calibration error. The authors introduced the scaling-binning calibrator, which first fits a parametric function and then bins the function values to ensure calibration. Thus, in addition to reporting results using the metrics described in Sec. 3.1, we report results by running the scaling-binning calibrator on top of each method that we considered. 3 We further include ECE results with multiple bin-values in order to reduce the gap.", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 41, |
|
"text": "(Kumar et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 176, |
|
"end": 195, |
|
"text": "Kumar et al. (2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 598, |
|
"end": 599, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Underestimation of Calibration Errors for Model with Continuous Outputs", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Our baseline model follows the general finetuning of large pretrained language models on downstream tasks: we finetune RoBERTa-base (Liu et al., 2019) on downstream tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline Model", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Despite the strong empirical performance of many calibration methods (e.g., ensembles), their usefulness in practice is limited due to increased computation and/or memory costs at inference time (Ovadia et al., 2019). In Sec. 4.3, we describe a simple baseline: recalibrate, ensemble, and distill. Distillation has been shown to mostly \"preserve\" performance in terms of accuracy -stronger teacher models tend to translate to stronger students (Hinton et al., 2015) . However, whether distillation could also \"preserve\" calibration performance is less studied. A model with better performance does not necessarily translate to better calibration (Guo et al., 2017) . Here, we briefly look at the distillation's objective from an angle of uncertainty matching, and show that they are related intuitively. Sec. 6.1 provides empirical evidence showing that the teacher model's calibration performance could be distilled into the student model. There are two ways to see the connection. First, note that distillation tries to minimize the KLdivergence between the teacher output distribution and the student output distribution. This intuitively regularizes the student model to output confidence values that would be close to the confidence values from the teacher model. Later in Sec. 6.1, experimental results show that the confidences from two models indeed correlate positively. Another perspective, which we elaborate below, considers distillation as encouraging the students to output uncertainty close to that of teacher models. Figure: Visualization of calibration performance, measured by SCEs (debiased), between teacher and student models, trained on RTE and evaluated on QNLI. The n in the legend refers to the size of ensemble(s). One metric/task, emphasizing different ensemble sizes. The Other Three Figures: These are zoomed-out versions of the left-most figure, along with other tasks. Instead of using color to imply the ensemble size, here the color refers to the task in which the models are evaluated, and points of different ensemble sizes but the same evaluation task are aggregated and represented by the same color. Each sub-figure represents the evaluation metric. More tasks/metrics, less emphasis on ensemble sizes. All Figures: The X-axis refers to the teacher model performance, and the Y-axis refers to the student model performance. Each dot represents a different configuration used in the teacher model. The P/S in the legends refer to the Pearson/Spearman correlations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 444, |
|
"end": 465, |
|
"text": "(Hinton et al., 2015)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 646, |
|
"end": 664, |
|
"text": "(Guo et al., 2017)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1533, |
|
"end": 1540, |
|
"text": "Figure:", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Distillation and Uncertainty", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We start by defining a loss function as a weighted combination of the regular cross entropy loss function and a regularization term that measures the difference in the uncertainty between the student model, \u03b8, and the teacher model, \u03b8 ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Distillation and Uncertainty", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "L(\u03b8) = (1 \u2212 \u03b1)L XE (\u03b8) + \u03b1|H(\u03b8) \u2212 H(\u03b8 )|, (6)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Distillation and Uncertainty", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "where H refers to predictive entropy (Gal, 2016) , and is defined as (\u03b8 is ignored for simplicity), Gal (2016) showed that H(y|x, D) could be approximated using samples from the (approximate) posterior distribution of the parameters. In practice, this could be satisfied, for example, if the student model is trained using dropout, and the teacher model uses either MC-dropout or ensembles. 4 Next, suppose we approximate one of the predictive entropy terms using cross entropy. This turns the second term in Eq. 6 into KL-divergence, and hence recovers the distillation objective. 5", |
|
"cite_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 48, |
|
"text": "(Gal, 2016)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 100, |
|
"end": 110, |
|
"text": "Gal (2016)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 391, |
|
"end": 392, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Distillation and Uncertainty", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "H(y|x, D)=\u2212 c p(y=c|x, D) log p(y=c|x, D). (7)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Distillation and Uncertainty", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "This simple algebraic manipulation shows that distillation has the effect of encouraging the student model to match the teacher model's uncertainty, and motivates us to build a simple recalibration method \"recalibrate, ensemble, and distill\" by first building an expensive yet calibrated teacher model (an ensemble of models each of which is recalibrated using temperature scaling), 6 and then distilling the expensive teacher model into a cheaper student model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 383, |
|
"end": 384, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recalibrate, Ensemble, and Distill", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The training cost is roughly (N + 1)C 0 + C 1 , where N is the ensemble size, C 0 the cost of training the baseline, +1 comes from distillation, and C 1 comes from training the temperature scaling model (which is relatively cheap). However, the inference cost is almost the same as a single model (i.e., small overhead), which is very useful when inference is the primary concern (e.g., deployment).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recalibrate, Ensemble, and Distill", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The distillation term is often written as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Choosing the Distillation Temperature", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "D KL P (x; \u03b8 , T ) || P (x; \u03b8, T ) ,", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "Choosing the Distillation Temperature", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "where P (x; \u03b8, T ) = softmax(f (x; \u03b8)/T ) and T is usually a hyperparameter to be tuned. One might notice that this is similar to the equation of temperature scaling (Eq. 2). This, together with the uncertainty matching viewpoint, motivates a small change to the distillation: we can remove the T from the student, and choose the constantT for the teacher that minimizes the calibration error,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Choosing the Distillation Temperature", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "D KL P (x; \u03b8 , arg min T CE(\u03b8 ,T )) || P (x; \u03b8) ,", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "Choosing the Distillation Temperature", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "which is similar to performing another temperature scaling. The motivation is that we want the student model to produce calibrated probabilities rather than the scaled version of the student. If we simultaneously scale the student by T , then f (x; \u03b8)/T would be calibrated, but the student model itself would not. We want to emphasize here that we are not the first ones to describe the connection between distillation and calibration, related findings have been presented in previous works (Tang et al., 2020; M\u00fcller et al., 2019) . However, we believe our view from the angle of predictive entropy is novel. More importantly, we conduct extensive experiments and analyses in the context of finetuned language models for several text classification tasks, to empirically verify that calibration performance between student and teacher model is correlated.", |
|
"cite_spans": [ |
|
{ |
|
"start": 492, |
|
"end": 511, |
|
"text": "(Tang et al., 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 512, |
|
"end": 532, |
|
"text": "M\u00fcller et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Choosing the Distillation Temperature", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "We include additional details in the supplementary materials. Also included are expanded experiment results, such as figures evaluated on more tasks using more evaluation metrics (Sec. 6.1), and detailed/expanded results tables as well as accuracy and ECEs with multiple bin-sizes (Sec. 6.2).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setup", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Model. Our codebase is largely based on Hug-gingFace Transformers (Wolf et al., 2019). When applicable, we use an ensemble size 2, and chooseT (Eq. 9) based on the Brier Scores on the validation dataset. The baseline model has 125.2M parameters, the temperature-scaling model (heteroscedastic variant) has 125.8M, and our method has 125.2M (same as the baseline model).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setup", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Data. We perform experiments on the classification tasks from the GLUE Benchmark (Wang et al., 2019), and we refer readers to Wang et al. (2019) regarding dataset statistics. Because the calculation of calibration errors requires access to the ground truth data, which is not available for GLUE data, we split the validation dataset into two halves, one for validation and the other for test, following Desai and Durrett (2020) . For MultiNLI, we merge the results for both MultiNLI matched and mismatched sections. When computing the out-of-domain performance between the 3-label MultiNLI and other 2-label NLI tasks, we follow jiant (Pruksachatkun et al., 2020) and merge the predictions/labels that correspond to \"neutral\" and \"contradiction\" into a single category.", |
|
"cite_spans": [ |
|
{ |
|
"start": 403, |
|
"end": 427, |
|
"text": "Desai and Durrett (2020)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setup", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Evaluation. Our evaluation follows Guo et al. (2017) , Ovadia et al. 2019, and Kumar et al. (2019). The train and evaluation data come from the same task for in-domain evaluations, but they come from different tasks of the same type for out-of-domain evaluations. We group MRPC and QQP (paraphrase tasks), and group MNLI (2-label version), QNLI, RTE, and WNLI (NLI tasks). We leave SST-2 (sentiment), CoLA (acceptability), and MNLI (3-label version, NLI) as separate groups. We use the in-domain validation data to train the scaling-binning calibrator. 7", |
|
"cite_spans": [ |
|
{ |
|
"start": 35, |
|
"end": 52, |
|
"text": "Guo et al. (2017)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setup", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Analysis Experiments Details. We conduct experiments on RTE, in which we distill teacher models with different ensemble-sizes (from 1 to 6) and the temperature scaling constant (from 0.50 to 2.00 with a step size of 0.02) to student models. Each model is then evaluated on both in-domain task (RTE) and out-of-domain tasks (MNLI-2, QNLI, WNLI) using confidence, ECE, MCE, Brier Scores, SCE (debiased) and SCE (biased). The numbers represent performances on the validation dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setup", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Sec. 4.2 shows the connection between distillation and uncertainty regularization. In this section, we perform analysis experiments examining the correlation between the calibration performance of the teacher models and student models. We conduct experiments on RTE, in which we distill teacher models with different ensemble-sizes and the temperature scaling constant to student models. Each model is then evaluated on both in-domain and out-of-domain tasks. Numbers here represent performances on the validation dataset. We start by examining the calibration performances of teacher and student models, where we vary the calibration performance of the teacher model while holding the accuracy almost the same. 8 Fig. 1 (left) shows the debiased Squared Calibration Error of models trained on RTE and 7 We only use the 2-label version of MNLI for evaluation. We use accuracy for CoLA evaluation so that calibration error computations would be more consistent across tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 802, |
|
"end": 803, |
|
"text": "7", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 714, |
|
"end": 727, |
|
"text": "Fig. 1 (left)", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments 6.1 Analysis Experiments", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "8 Note the accuracy of teacher models with the same ensemble size but different temperature scaling constants would be almost the same, as for each model, temperature scaling constant sharpens/flattens the probabilities but usually does not change their relative ranking. The motivation here is to reduce external influences, as comparing calibration performance might not be very meaningful if the predictions/accuracies change significantly. evaluated on QNLI. We can observe that, by varying the teacher model's calibration performance, the calibration performance of the student model also changes in similar directions. Next, Fig. 1 (right) depicts the calibration performances of each teacher-student pair across multiple calibration metrics. Similarly, these figures indicate that correlation of calibration performance between teacher/student models are in general positive. This confirms the intuition described in Sec. 4.2 that calibration performance of the teacher model could be distilled into the student model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 631, |
|
"end": 637, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments 6.1 Analysis Experiments", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Next, we show our experimental results comparing the following four models: Baseline (Baseline, Sec. 4.1), Ensemble (Lakshminarayanan et al., 2017) (Ensemble, Sec. 2), Temperature Scaling (Guo et al., 2017 ) (TempScale, Sec. 2), our method (Ours, Sec. 4.2), and its variant with automatic distillation temperature selection (Ours (T ), Sec. 4.4). For each table, we report results with and without running the scaling-binning calibrator following the description in Sec. 3.2. Due to space constraints, we discuss and display the average performances in here (please see Sec. 5).", |
|
"cite_spans": [ |
|
{ |
|
"start": 188, |
|
"end": 205, |
|
"text": "(Guo et al., 2017", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Main Experiments", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Baseline Performances. Results are shown in Table 1 ; here, we can see that the baseline has relatively high calibration errors. Notably, the outof-domain ECE values are around 18\u221219, interpreted as over/under-estimating the probability by about 18\u221219% in expectation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 44, |
|
"end": 51, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Main Experiments", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Ensemble and Temperature Scaling. Next, we add ensembles/temperature scaling to the baseline. Results in Table 1 show that performances improve in general, especially in the out-of-domain settings: 3/9 in-domain metrics improve (2/9 metrics similar) and 8/9 out-of-domain metrics improve for ensembles, 6/9 in-domain metrics improve (2/9 metrics similar) and 7/9 out-of-domain metrics improve (1/9 metrics similar) for temperature-scaling. The results are largely consistent with previous observations that temperature-scaling performed better when the data come from in-domain (it outperforms ensembles among 7/9 metrics and 1/9 similar in in-domain settings), whereas ensembles are more competitive in out-of-domain settings at the cost of extra computation (it out-performs temperature scaling in 4/9 metrics in out-of-domain settings while being similar in 3/9).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 112, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Main Experiments", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Our Methods. Then, we apply our method, which has the same computation at inference time as the baseline. Table 1 showed that performances improve as well despite having no extra inferencetime computation cost: 2/9 metrics improve (3/9 metrics similar) in-domain and 9/9 metrics improve out-of-domain. Applying the automatic temperature selection on top of our method further improves out-of-domain performance in 4 metrics. However, using automatic temperature does not further improve the performance when we additionally apply the scaling-binning calibrator. We hypothesize that this is because temperature values are chosen based on evaluation metrics before applying the scaling-binning calibrator, thus fail to take it into account. Also, comparing our method to ensembles and temperature scaling, our method improves upon temperature scaling in 5/9 metrics in out-of-domain settings (1/9 similar), but outperforms the more expensive ensembles in just 3/9 metrics (1/9 similar). Comparing our method with automatic temperature selection, we can see 8/9 metrics in out-of-domain settings improves compared to temperature scaling, and 5/9 compared to ensembles (1/9 similar). This shows that our methods are competitive in out-of-domain settings with little extra computation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 113, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Main Experiments", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "In this section, we (1) ablate our method by removing components to gain insights into how each of the components contribute to the final performance, 9 and (2) measure how well distillation transfers calibration performance. First, we remove ensembles (or temperature scaling), and include only temperature scaling (or ensembles) and distillation (\u2212Ensembles and \u2212TempScale, respectively). We can see from the results in Table 2 that removing either of them leads to worse performances in general: 7/9 in-domain (2/9 being similar) and 6/9 (2/9 being similar) outof-domain for removing ensembles, 4/9 in-domain (4/9 similar) and 9/9 out-of-domain for removing temperature scaling. This shows that the additional calibration gains from the teacher model can be effectively distilled into the student models.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 422, |
|
"end": 429, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Experiments", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "Next, we compare the models before/after distillation (\u2212Distillation). 10 As expected, the teacher model (before distillation) achieved strong performance at the expense of extra inference-time computation. We then study to what extent distillation transfers calibration performance. Let A t and B t be two different teacher models (before distillation) with difference in only one of the components (e.g., ensemble or temperature-scaling), and let A s and B s be the corresponding student models (after distillation). Then, we compute the relative percentage of improvement because of a component from teacher to student model (assuming A is more powerful than B), denoted as \u03c1 AB :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ablation Experiments", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03c1 AB = \u03b5(A s ) \u2212 \u03b5(B s ) \u03b5(A t ) \u2212 \u03b5(B t ) \u00d7 100,", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "Ablation Experiments", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "where \u03b5(\u2022) denotes the out-of-domain calibration performance. We compute \u03c1 AB for each metric, and use the median of percentages as the summary statistic. We found 40.8% (111.2%) of the improvements from adding ensembles (temperature scaling) as extra components in teacher models are transferred to students models via distillation. 11", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ablation Experiments", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "We presented a study of calibration of finetuned language models in the context of text classification, where models are evaluated on in-domain and out-of-domain data. We showed the effectiveness of a few widely-used calibration methods. We illustrated the intuitive connection between distillation and calibration, and described simple yet competitive calibration methods. We conducted experiments to empirically understand whether distillation can be used to distill calibration performance, and showed that the simple methods we described achieved competitive out-of-domain calibration performances. We further presented ablation studies on the usefulness of components of the proposed method and examined the transferability of calibration via distillation. However, our method is limited in that it requires an overhead cost involved in training the student model, which could be expensive in some settings. We leave it to future works to investigate more efficient inferencetime recalibration techniques. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Two types of uncertainty commonly appear in machine learning literature: epistemic uncertainty and aleatoric uncertainty (Gal, 2016; Kendall and Gal, 2017) . Epistemic uncertainty accounts for uncertainty in model parameters, and tends to decrease as the amount of observed data increases. Aleatoric uncertainty conveys the noise inherent in the observations, and thus cannot be explained away with an increasing amount of data available. In the case of classification, examples of aleatoric uncertainty include the probability of the top class, 12 and the entropy of the probability distribution over classes (Kendall et al., 2018) ; examples of epistemic uncertainties include the mutual information. 13 In the literature of uncertainty calibration, we usually calibrate aleatoric uncertainty measured by the probability of the prediction. In Sec. 4.2, we also view distillation from the angle of matching another uncertainty between teacher model and student model, the predictive entropy (Gal, 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 132, |
|
"text": "(Gal, 2016;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 133, |
|
"end": 155, |
|
"text": "Kendall and Gal, 2017)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 610, |
|
"end": 632, |
|
"text": "(Kendall et al., 2018)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 703, |
|
"end": 705, |
|
"text": "13", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 992, |
|
"end": 1003, |
|
"text": "(Gal, 2016)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Epistemic and Aleatoric Uncertainty", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The quality of the uncertainty measurement is usually measured via calibration (Kendall and Gal, 2017) . In the context of calibration, the uncertainties often refer to predictive probabilities. The model is calibrated if the predictive probabilities match the empirical frequency of the data (Gal, 12 More specifically, it is one minus the probability/confidence of the top class.", |
|
"cite_spans": [ |
|
{ |
|
"start": 79, |
|
"end": 102, |
|
"text": "(Kendall and Gal, 2017)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 293, |
|
"end": 298, |
|
"text": "(Gal,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 299, |
|
"end": 301, |
|
"text": "12", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.2 Uncertainty Calibration", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "13 Please see page 54 in Gal (2016) for details. 2016). Let\u0176 andP be the predicted class and its associated confidence (probability of correctness) of a neural network. We would like the confidence estimatesP to be calibrated, which intuitively means that we wantP to represent true probabilities (Guo et al., 2017) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 25, |
|
"end": 35, |
|
"text": "Gal (2016)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 297, |
|
"end": 315, |
|
"text": "(Guo et al., 2017)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.2 Uncertainty Calibration", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P(\u0176 = Y |P = p) = p, \u2200p \u2208 [0, 1].", |
|
"eq_num": "(11)" |
|
} |
|
], |
|
"section": "A.2 Uncertainty Calibration", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Suppose a classification model is given N input examples, and made predictions\u0177 1 , ...,\u0177 N , each withp = 0.35. We would expect 35% of the predictions would be correct. The problem of uncertainty/confidence calibration and confidence scores have been studied and applied in various settings (Kuleshov and Liang, 2015; Kuleshov and Ermon, 2017; Pereyra et al., 2017; Hendrycks and Gimpel, 2017; Elsahar and Gall\u00e9, 2019; Reddy et al., 2019) . In practice, however, perfect calibration is almost impossible (Guo et al., 2017) , and estimating the first term in Eq. 11 is not straightforward using finite samples, because in most casesP is a continuous random variable (Guo et al., 2017; Kumar et al., 2019) . In Sec. 3, we describe ways to estimate the calibration performance. It has been widely observed that modern neural networks are usually not calibrated out of the box (Platt, 1999; Zadrozny and Elkan, 2001; Guo et al., 2017; Ovadia et al., 2019) . Recalibration methods improve calibration by transforming un-calibrated outputs into calibrated outputs/probabilities, and they include scaling-based methods (Platt, 1999; Guo et al., 2017) , histogrambinning-based methods (Guo et al., 2017; Zadrozny and Elkan, 2001) , and ensembles (Lakshminarayanan et al., 2017) . Recently, Kumar et al. (2019) proposed the scaling-binning calibrator and a more sample-efficient estimator of calibration error. In our work, we describe simple approaches that combines the strength of ensembles and temperature scaling without introducing computation at inference time; we further apply the scalingbinning calibrator to ensure calibration.", |
|
"cite_spans": [ |
|
{ |
|
"start": 292, |
|
"end": 318, |
|
"text": "(Kuleshov and Liang, 2015;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 319, |
|
"end": 344, |
|
"text": "Kuleshov and Ermon, 2017;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 345, |
|
"end": 366, |
|
"text": "Pereyra et al., 2017;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 367, |
|
"end": 394, |
|
"text": "Hendrycks and Gimpel, 2017;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 395, |
|
"end": 419, |
|
"text": "Elsahar and Gall\u00e9, 2019;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 420, |
|
"end": 439, |
|
"text": "Reddy et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 505, |
|
"end": 523, |
|
"text": "(Guo et al., 2017)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 666, |
|
"end": 684, |
|
"text": "(Guo et al., 2017;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 685, |
|
"end": 704, |
|
"text": "Kumar et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 874, |
|
"end": 887, |
|
"text": "(Platt, 1999;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 888, |
|
"end": 913, |
|
"text": "Zadrozny and Elkan, 2001;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 914, |
|
"end": 931, |
|
"text": "Guo et al., 2017;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 932, |
|
"end": 952, |
|
"text": "Ovadia et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1113, |
|
"end": 1126, |
|
"text": "(Platt, 1999;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1127, |
|
"end": 1144, |
|
"text": "Guo et al., 2017)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1178, |
|
"end": 1196, |
|
"text": "(Guo et al., 2017;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1197, |
|
"end": 1222, |
|
"text": "Zadrozny and Elkan, 2001)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1239, |
|
"end": 1270, |
|
"text": "(Lakshminarayanan et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1273, |
|
"end": 1302, |
|
"text": "Recently, Kumar et al. (2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.2 Uncertainty Calibration", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Ensembles work by aggregating multiple networks trained independently on the entire dataset, and has been shown to achieve strong performance in out-of-domain calibration (Ovadia et al., 2019; Lakshminarayanan et al., 2017) . 14 Temperature scaling is an extension of Platt scaling (Guo et al., 2017) . It uses a single scalar parameter T > 0 for all classes. Given output z i (usually logits vectors), the confidence prediction is:", |
|
"cite_spans": [ |
|
{ |
|
"start": 171, |
|
"end": 192, |
|
"text": "(Ovadia et al., 2019;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 193, |
|
"end": 223, |
|
"text": "Lakshminarayanan et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 282, |
|
"end": 300, |
|
"text": "(Guo et al., 2017)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.2 Uncertainty Calibration", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p i = max k \u03c3(z i,k /T ).", |
|
"eq_num": "(12)" |
|
} |
|
], |
|
"section": "A.2 Uncertainty Calibration", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "An extension, called heteroscedastic regression, is used in our work, which replaces the constant scalar with learned values (Kendall and Gal, 2017; Kendall et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 125, |
|
"end": 148, |
|
"text": "(Kendall and Gal, 2017;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 170, |
|
"text": "Kendall et al., 2018)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.2 Uncertainty Calibration", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Knowledge distillation (Hinton et al., 2015; Domingos, 1997; Blum and Mitchell, 1998; Zeng and Martinez, 2000; Ba and Caruana, 2014 ) is a compression technique in which a compact model (usually referred to as the student model) is trained to mimic the behavior of a more powerful teacher model. In the context of classification, knowledge distillation works by augmenting the loss function with an additional term D KL (p i p j ) where p i = softmax(z i /T ) and p j = softmax(z j /T ) with z i and z j the logits from two models, and T controls the smoothness of the output distribution. Knowledge distillation has been used in a wide range of applications (Bucilu\u01ce et al., 2006; Wang et al., 2018; Kim and Rush, 2016; Furlanello et al., 2018; Clark et al., 2019; Teh et al., 2017; Schwarz et al., 2018; Sanh et al., 2019) . In this work, we show that distillation can also be used to distill calibration performance, and use it to build simple yet competitive recalibration methods. A related area of research is label smoothing (Yuan et al., 2020) . Label smoothing replaces the hard/one-hot targets y k with modified targets y k (1\u2212\u03b1)+\u03b1/K, where K is the number of classes and \u03b1 is a hyper-parameter. Pereyra et al. 2017showed that label smoothing provides consistent gains across many tasks and proposed a new regularizer, termed confidence penalty. M\u00fcller et al. (2019) studied when label smoothing is helpful , and found that label smoothing can implicitly calibrate model's predictions. Instead, our use of a teacher model can be seen as adaptively deciding how much smoothing is needed (Tang et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 23, |
|
"end": 44, |
|
"text": "(Hinton et al., 2015;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 45, |
|
"end": 60, |
|
"text": "Domingos, 1997;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 61, |
|
"end": 85, |
|
"text": "Blum and Mitchell, 1998;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 86, |
|
"end": 110, |
|
"text": "Zeng and Martinez, 2000;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 111, |
|
"end": 131, |
|
"text": "Ba and Caruana, 2014", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 659, |
|
"end": 681, |
|
"text": "(Bucilu\u01ce et al., 2006;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 682, |
|
"end": 700, |
|
"text": "Wang et al., 2018;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 701, |
|
"end": 720, |
|
"text": "Kim and Rush, 2016;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 721, |
|
"end": 745, |
|
"text": "Furlanello et al., 2018;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 746, |
|
"end": 765, |
|
"text": "Clark et al., 2019;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 766, |
|
"end": 783, |
|
"text": "Teh et al., 2017;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 784, |
|
"end": 805, |
|
"text": "Schwarz et al., 2018;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 806, |
|
"end": 824, |
|
"text": "Sanh et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1032, |
|
"end": 1051, |
|
"text": "(Yuan et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1596, |
|
"end": 1615, |
|
"text": "(Tang et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.3 Distillation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Finally, there are also a few recent related works in the computer vision literature, e.g., Yun et al. (2020) proposed to distill the predictive distribution between different samples of the same label during training to improve calibration performance, Gurau et al. (2018) proposed Distilled Dropout Network which distills knowledge from multiple MC samples from the teacher to improve the reliability of its uncertainty scores. In our work, we mainly focus on language tasks. Concurrent to our work, Desai and Durrett (2020) studied the calibration of pretrained transformers when finetuned to downstream tasks, and Hendrycks et al. (2020) studied the outof-distribution robustness of pretrained transformers. We are different from these two works in that first we present a systematic study on the out-ofdistribution calibration; second we draw insights from the connection between distillation and temperature scaling to design simple yet competitive recalibration methods; third, we conduct experiments to understand the connection between these two concepts empirically; finally, we also include a more comprehensive set of calibration evaluations following Ovadia et al. 2019and Kumar et al. (2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 92, |
|
"end": 109, |
|
"text": "Yun et al. (2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 254, |
|
"end": 273, |
|
"text": "Gurau et al. (2018)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 502, |
|
"end": 526, |
|
"text": "Desai and Durrett (2020)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 618, |
|
"end": 641, |
|
"text": "Hendrycks et al. (2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1186, |
|
"end": 1205, |
|
"text": "Kumar et al. (2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.4 Recent Related Works", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Model Details and Hyperparameter Search. Our codebase is largely based on the Transformers library from HuggingFace (Wolf et al., 2019). 15 We used RoBERTa-base (Liu et al., 2019) for the language model backbone and used most of the default/recommended hyperparameters in the Transformers library. We tried two values of the learning rate in our initial experiments: 2e\u22125 and 1e\u22125; these numbers are chosen based on the hyperparameter search described in the library, and we stick to one of them (1e\u22125) based on accuracy. For experiments that involve ensembles, we use an ensemble size 2. For experiments that involve distillation, we set T = 1.0 (i.e., no scaling) for models without automatic temperature selection unless we explicitly mention otherwise. When automatic temperature selection is used, we choseT based on the Brier Scores on the validation dataset. All of our experiments ran on a single V100 GPU. The baseline model has 125.2M parameters, the temperature-scaling (heteroscedastic variant) has 125.8M parameters, and our method has 125.2M (same as the baseline model). We train multiple models using different random seeds before ensembling them, but otherwise run the training and inference once. The runtime varies among tasks, but most of them could finish within a day.", |
|
"cite_spans": [ |
|
{ |
|
"start": 137, |
|
"end": 139, |
|
"text": "15", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Setup Details", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Data. We perform experiments on the classification tasks from the GLUE Benchmark (Wang et al., 2019; Warstadt et al., 2019; Dolan and Brockett, 2005; Agirre et al., 2007; Williams et al., 2018; Rajpurkar et al., 2016; Bar-Haim et al., 2006; Giampiccolo et al., 2007; Bentivogli et al., 2009; Levesque et al., 2011) . 1617 , and we refer readers to Wang et al. (2019) regarding dataset statistics. Because the calculation of calibration errors requires access to the ground truth data, which is not available for GLUE data, we split the validation dataset into two halves, one for validation and the other for test, following the approach of Desai and Durrett (2020) . For MultiNLI, we merge the results for both MultiNLI matched and mismatched sections. When computing the out-of-domain performance between the 3-label MultiNLI and other 2-label NLI tasks, we follow the approach used in the jiant library (Pruksachatkun et al., 2020) and merge the predictions/labels that correspond to \"neutral\" and \"contradiction\" into a single category. We follow the Transformers library for the rest of the data preprocessing. 18 and we use the default bin-size of 10 (in the tables below, we additionally include the performances when evaluating using bin-sizes of 15 and 50). For squared calibration errors (debiased or biased), we use the uncertainty-calibration library from Kumar et al. (2019) and follow default configurations whenever possible. 19 For in-domain evaluations, the train data and evaluation data come from the same task. For outof-domain evaluations, the train data and evaluation data come from different tasks of the same type. We group MRPC and QQP (paraphrase tasks), and group MNLI (2-label version), 20 QNLI, RTE, 16 https://gluebenchmark.com/ 17 QQP dataset: https:// www.quora.com/q/quoradata/ First-Quora-Dataset-Release-Question-Pairs", |
|
"cite_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 100, |
|
"text": "(Wang et al., 2019;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 101, |
|
"end": 123, |
|
"text": "Warstadt et al., 2019;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 124, |
|
"end": 149, |
|
"text": "Dolan and Brockett, 2005;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 150, |
|
"end": 170, |
|
"text": "Agirre et al., 2007;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 171, |
|
"end": 193, |
|
"text": "Williams et al., 2018;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 194, |
|
"end": 217, |
|
"text": "Rajpurkar et al., 2016;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 218, |
|
"end": 240, |
|
"text": "Bar-Haim et al., 2006;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 241, |
|
"end": 266, |
|
"text": "Giampiccolo et al., 2007;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 267, |
|
"end": 291, |
|
"text": "Bentivogli et al., 2009;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 292, |
|
"end": 314, |
|
"text": "Levesque et al., 2011)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 317, |
|
"end": 321, |
|
"text": "1617", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 641, |
|
"end": 665, |
|
"text": "Desai and Durrett (2020)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 906, |
|
"end": 934, |
|
"text": "(Pruksachatkun et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1116, |
|
"end": 1118, |
|
"text": "18", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1441, |
|
"end": 1443, |
|
"text": "19", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1730, |
|
"end": 1732, |
|
"text": "16", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Setup Details", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "18 https://github.com/google-research/ google-research/tree/master/uq_ benchmark_2019", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Setup Details", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "19 https://github.com/p-lambda/verified_ calibration 20 We only use the 2-label version of MNLI for evaluation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 55, |
|
"text": "20", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Setup Details", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "and WNLI (NLI tasks). We leave SST-2 (sentiment), CoLA 21 (acceptability), and MNLI (3-label version, NLI) as separate groups. We use the indomain validation data to train the scaling-binning calibrator.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Setup Details", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Analysis Experiments Details. We conduct experiments on RTE, in which we distill teacher models with different ensemble-sizes (from 1 to 6) and the temperature scaling constant (from 0.50 to 2.00 with a step size of 0.02) to student models. Each model is then evaluated on both in-domain task (RTE) and out-of-domain tasks (MNLI-2, QNLI, WNLI) using confidence, ECE, MCE, Brier Scores, SCE (debiased) and SCE (biased). The numbers represent performances on the validation dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Setup Details", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Distillation Transferability of Calibration.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C Further Experiment Details", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We compute \u03c1 AB based on both Table 1 and Table 2 of the main paper. The percentage of improvement presented in Sec. 6.3 of the main paper on ensembles is computed based on temperature scaling + ensembles (\u2212Distillation in main paper Table 2 ) as A t , ensembles only (Ensemble in main paper Table 1 ) as B t , temperature scaling + ensembles + distillation (Ours in main paper Table 2 ) as A s , and ensembles + distillation (\u2212TempScale in main paper Table 2 ) as B s . The percentage of improvement on temperature scaling is computed similarly with temperature scaling component as the main difference between the teacher/student models.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 30, |
|
"end": 37, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 42, |
|
"end": 49, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 234, |
|
"end": 241, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 292, |
|
"end": 299, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 378, |
|
"end": 385, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 452, |
|
"end": 459, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "C Further Experiment Details", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Please see Fig. 2 for the expanded visualization of the analysis experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 17, |
|
"text": "Fig. 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "D Expanded Analysis Experiments", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Please see Table 3 and Table 4 for detailed indomain and out-of-domain experiment results.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 18, |
|
"text": "Table 3", |
|
"ref_id": "TABREF7" |
|
}, |
|
{ |
|
"start": 23, |
|
"end": 30, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "E Detailed Main Experiment Results", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Please see Table 5 and Table 6 for detailed indomain and out-of-domain ablation experiment results. The X-axis refers to the performance of the teacher model, and the Y-axis refers to the performance of the student model. Within each sub-figure, each dot represents a different configuration used in the teacher model. The P/S in the legends refer to the Pearson/Spearman correlations. Table 4 : Out-of-domain performances on MRPC (MR), QQP (QQ), QNLI (QN), RTE (R), WNLI (W). We use M2 to denote the 2-label version of MultiNLI task. Note that for the metrics we considered here, lower scores indicate better calibration. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 18, |
|
"text": "Table 5", |
|
"ref_id": "TABREF10" |
|
}, |
|
{ |
|
"start": 23, |
|
"end": 30, |
|
"text": "Table 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 386, |
|
"end": 393, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "F Detailed Ablation Experiment Results", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "One can further include negative log-likelihood score. However, we want to avoid overcrowding the results table with too many numbers (which is already large, please see the supplementary materials Table 3-6). Since both Brier Score and NLL are proper-scoring rules (see Sec.3 in Ovadia et al. (2019)), we believe the results would be qualitatively similar.2 Technically, this is 2-norm Calibration Error. But we refer to this as the Squared Calibration Error for notation simplicity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The top-label variant of scaling-binning calibrator we use outputs calibrated probabilities of the top predictions, whereas Brier Scores require full probability vectors. Thus we exclude Brier Scores when using the scaling-binning calibrator.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note that the samples from a model using dropout (MCdropout) or ensemble could be used to approximate the posterior distribution(Gal, 2016; Lakshminarayanan et al., 2017).5 Note that the approximation error equals the KL divergence, the term that the objective function seeks to minimize. As KL-divergence decreases, the approximation error also decreases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "There are many ways to construct a powerful/expensive teacher model, and we choose the popular ensemble method for simplicity. Alternatives includes MC-dropout (with multiple forward passes) and SWA(Izmailov et al., 2018).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For ease of comparison, we only ablate the system without the automatic temperature selection.10 The \u2212Distillation inTable 2is the result of combining ensembles and temperature-scaling. InTable 1, we showed that distillation (especially when combined with automatic temperature) could be helpful compared to either ensembles or temperature-scaling alone.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We chose median as it is simple and less affected by outliers. Please see the supplementary materials Sec. C for more details.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "More generally, there are randomization-based ensembles and boosting-based ensembles. Within the randomizationbased ensembles, in our work we use the entire training dataset to train each model instead of different bootstrap samples of the original training set(Lakshminarayanan et al., 2017).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/huggingface/ transformers. We used v2.4.1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use accuracy for CoLA evaluation so that calibration error computations would be more consistent across tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank the reviewers for their helpful comments. This work was supported by DARPA YFA17-D17AP00022, ONR Grant N00014-18-1-2871, and Microsoft PhD Fellowship. The views contained in this article are those of the authors and not of the funding agency.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Proceedings of the Fourth International Workshop on Semantic Evaluations (SemEval-2007)", |
|
"authors": [ |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Llu'is M'arquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wicentowski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eneko Agirre, Llu'is M'arquez, and Richard Wicen- towski, editors. 2007. Proceedings of the Fourth International Workshop on Semantic Evaluations (SemEval-2007). Association for Computational Linguistics, Prague, Czech Republic.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Do deep nets really need to be deep?", |
|
"authors": [ |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rich", |
|
"middle": [], |
|
"last": "Caruana", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jimmy Ba and Rich Caruana. 2014. Do deep nets really need to be deep? In NeurIPS.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Optimizing ai for teamwork", |
|
"authors": [ |
|
{ |
|
"first": "Gagan", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Besmira", |
|
"middle": [], |
|
"last": "Nushi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ece", |
|
"middle": [], |
|
"last": "Kamar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Horvitz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Weld", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.13102" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gagan Bansal, Besmira Nushi, Ece Kamar, Eric Horvitz, and Daniel S Weld. 2020. Optimizing ai for teamwork. arXiv:2004.13102.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "The second pascal recognising textual entailment challenge", |
|
"authors": [ |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Bar-Haim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Dolan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lisa", |
|
"middle": [], |
|
"last": "Ferro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danilo", |
|
"middle": [], |
|
"last": "Giampiccolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernardo", |
|
"middle": [], |
|
"last": "Magnini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Idan", |
|
"middle": [], |
|
"last": "Szpektor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "RTE", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roy Bar-Haim, Ido Dagan, Bill Dolan, Lisa Ferro, Danilo Giampiccolo, Bernardo Magnini, and Idan Szpektor. 2006. The second pascal recognising tex- tual entailment challenge. In RTE.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "The fifth PASCAL recognizing textual entailment challenge", |
|
"authors": [ |
|
{ |
|
"first": "Luisa", |
|
"middle": [], |
|
"last": "Bentivogli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hoa", |
|
"middle": [ |
|
"Trang" |
|
], |
|
"last": "Dang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danilo", |
|
"middle": [], |
|
"last": "Giampiccolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernardo", |
|
"middle": [], |
|
"last": "Magnini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "TAC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luisa Bentivogli, Ido Dagan, Hoa Trang Dang, Danilo Giampiccolo, and Bernardo Magnini. 2009. The fifth PASCAL recognizing textual entailment chal- lenge. In TAC.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Combining labeled and unlabeled data with co-training", |
|
"authors": [ |
|
{ |
|
"first": "Avrim", |
|
"middle": [], |
|
"last": "Blum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Avrim Blum and Tom Mitchell. 1998. Combining la- beled and unlabeled data with co-training. In COLT.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Verification of forecasts expressed in terms of probability", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Glenn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Brier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1950, |
|
"venue": "Monthly weather review", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Glenn W Brier. 1950. Verification of forecasts ex- pressed in terms of probability. Monthly weather review.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Model compression", |
|
"authors": [ |
|
{ |
|
"first": "Cristian", |
|
"middle": [], |
|
"last": "Bucilu\u01ce", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rich", |
|
"middle": [], |
|
"last": "Caruana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandru", |
|
"middle": [], |
|
"last": "Niculescu-Mizil", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "KDD", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cristian Bucilu\u01ce, Rich Caruana, and Alexandru Niculescu-Mizil. 2006. Model compression. In KDD.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Bam! born-again multi-task networks for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Urvashi", |
|
"middle": [], |
|
"last": "Khandelwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Clark, Minh-Thang Luong, Urvashi Khandelwal, Christopher D Manning, and Quoc Le. 2019. Bam! born-again multi-task networks for natural language understanding. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "The PASCAL recognising textual entailment challenge", |
|
"authors": [ |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Ido Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernardo", |
|
"middle": [], |
|
"last": "Glickman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Magnini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Machine learning challenges. evaluating predictive uncertainty, visual object classification, and recognising tectual entailment", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ido Dagan, Oren Glickman, and Bernardo Magnini. 2006. The PASCAL recognising textual entailment challenge. In Machine learning challenges. evalu- ating predictive uncertainty, visual object classifica- tion, and recognising tectual entailment. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Calibration of pre-trained transformers", |
|
"authors": [ |
|
{ |
|
"first": "Shrey", |
|
"middle": [], |
|
"last": "Desai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Durrett", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shrey Desai and Greg Durrett. 2020. Calibration of pre-trained transformers. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understand- ing. In NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Automatically constructing a corpus of sentential paraphrases", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "William", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dolan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Brockett", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "IWP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William B Dolan and Chris Brockett. 2005. Automati- cally constructing a corpus of sentential paraphrases. In IWP.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Knowledge acquisition from examples via multiple models", |
|
"authors": [ |
|
{ |
|
"first": "Pedro", |
|
"middle": [], |
|
"last": "Domingos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "ICML", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pedro Domingos. 1997. Knowledge acquisition from examples via multiple models. In ICML.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "To annotate or not? predicting performance drop under domain shift", |
|
"authors": [ |
|
{ |
|
"first": "Hady", |
|
"middle": [], |
|
"last": "Elsahar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Gall\u00e9", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1222" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hady Elsahar and Matthias Gall\u00e9. 2019. To annotate or not? predicting performance drop under domain shift. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Born again neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Tommaso", |
|
"middle": [], |
|
"last": "Furlanello", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [], |
|
"last": "Lipton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Tschannen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Itti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anima", |
|
"middle": [], |
|
"last": "Anandkumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tommaso Furlanello, Zachary Lipton, Michael Tschan- nen, Laurent Itti, and Anima Anandkumar. 2018. Born again neural networks. In ICML.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Uncertainty in Deep Learning", |
|
"authors": [ |
|
{ |
|
"first": "Yarin", |
|
"middle": [], |
|
"last": "Gal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yarin Gal. 2016. Uncertainty in Deep Learning. Ph.D. thesis, University of Cambridge.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "The third PASCAL recognizing textual entailment challenge", |
|
"authors": [ |
|
{ |
|
"first": "Danilo", |
|
"middle": [], |
|
"last": "Giampiccolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernardo", |
|
"middle": [], |
|
"last": "Magnini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "RTE", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Danilo Giampiccolo, Bernardo Magnini, Ido Dagan, and Bill Dolan. 2007. The third PASCAL recogniz- ing textual entailment challenge. In RTE.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "On calibration of modern neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Chuan", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoff", |
|
"middle": [], |
|
"last": "Pleiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kilian Q", |
|
"middle": [], |
|
"last": "Weinberger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ICML", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian Q Wein- berger. 2017. On calibration of modern neural net- works. In ICML.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Dropout distillation for efficiently estimating model confidence", |
|
"authors": [ |
|
{ |
|
"first": "Corina", |
|
"middle": [], |
|
"last": "Gurau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Bewley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ingmar", |
|
"middle": [], |
|
"last": "Posner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1809.10562" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Corina Gurau, Alex Bewley, and Ingmar Posner. 2018. Dropout distillation for efficiently estimating model confidence. arXiv:1809.10562.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A baseline for detecting misclassified and out-of-distribution examples in neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Hendrycks", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dan Hendrycks and Kevin Gimpel. 2017. A baseline for detecting misclassified and out-of-distribution examples in neural networks. In ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Rishabh Krishnan, and Dawn Song. 2020. Pretrained transformers improve out-of-distribution robustness", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Hendrycks", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Wallace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Dziedzic", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dan Hendrycks, Xiaoyuan Liu, Eric Wallace, Adam Dziedzic, Rishabh Krishnan, and Dawn Song. 2020. Pretrained transformers improve out-of-distribution robustness. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Distilling the knowledge in a neural network", |
|
"authors": [ |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "NeurIPS Deep Learning and Representation Learning Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Geoffrey Hinton, Oriol Vinyals, and Jeffrey Dean. 2015. Distilling the knowledge in a neural net- work. In NeurIPS Deep Learning and Representa- tion Learning Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Averaging weights leads to wider optima and better generalization", |
|
"authors": [ |
|
{ |
|
"first": "Pavel", |
|
"middle": [], |
|
"last": "Izmailov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dmitrii", |
|
"middle": [], |
|
"last": "Podoprikhin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timur", |
|
"middle": [], |
|
"last": "Garipov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dmitry", |
|
"middle": [], |
|
"last": "Vetrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"Gordon" |
|
], |
|
"last": "Wilson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "UAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pavel Izmailov, Dmitrii Podoprikhin, Timur Garipov, Dmitry Vetrov, and Andrew Gordon Wilson. 2018. Averaging weights leads to wider optima and better generalization. In UAI.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "What uncertainties do we need in bayesian deep learning for computer vision?", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Kendall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yarin", |
|
"middle": [], |
|
"last": "Gal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "NeurIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Kendall and Yarin Gal. 2017. What uncertainties do we need in bayesian deep learning for computer vision? In NeurIPS.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Multi-task learning using uncertainty to weigh losses for scene geometry and semantics", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Kendall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yarin", |
|
"middle": [], |
|
"last": "Gal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Cipolla", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "CVPR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Kendall, Yarin Gal, and Roberto Cipolla. 2018. Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. In CVPR.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Sequencelevel knowledge distillation", |
|
"authors": [ |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander M", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoon Kim and Alexander M Rush. 2016. Sequence- level knowledge distillation. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Expected Calibration Error (bin size = 10)", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Expected Calibration Error (bin size = 10)", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Expected Calibration Error (bin size = 10)", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Expected Calibration Error (bin size = 10)", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Expected Calibration Error (bin size = 15)", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Expected Calibration Error (bin size = 15)", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Expected Calibration Error (bin size = 15)", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Expected Calibration Error (bin size = 15)", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Expected Calibration Error (bin size = 50)", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Expected Calibration Error (bin size = 50)", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Expected Calibration Error (bin size = 50)", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Expected Calibration Error (bin size = 50)", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Calibration Error (debiased) Calibration Error (debiased, +SBC)", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Calibration Error (debiased) Calibration Error (debiased, +SBC)", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Calibration Error (biased) Calibration Error (biased, +SBC)", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Calibration Error (biased) Calibration Error (biased, +SBC)", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "We use M2 to denote the 2-label version of MultiNLI task. Note that for the metrics we considered here", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Out-of-domain ablation performances on MRPC (MR), QQP (QQ), QNLI (QN)", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Table 6: Out-of-domain ablation performances on MRPC (MR), QQP (QQ), QNLI (QN), RTE (R), WNLI (W). We use M2 to denote the 2-label version of MultiNLI task. Note that for the metrics we considered here, lower scores indicate better calibration.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Left-most", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"text": "Evaluation Details. Our evaluation follows Guo et al. (2017), Ovadia et al. (2019), and Kumar et al. (2019). For MCE, ECE, and Brier Score, our implementation follows Ovadia et al. (2019),", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"text": "Figure (a): Visualization of calibration performance, measured by SCEs (debiased and biased), between teacher models and student models, trained on RTE evaluated on RTE (in-domain), WNLI, QNLI, and 2-label version of MNLI (out-of-domain). The n in the legend refers to the size of ensemble(s).Figure (b): This is a zoomed-out version ofFigure (a). Instead of using color to imply the ensemble size, here the color refers to the task in which the models are evaluated, and points of different ensemble sizes but the same evaluation task are aggregated and represented by the same color. Here each sub-figure represents the evaluation metric. AllFigures:", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "In-domain and out-of-domain experiment results averaged across tasks. SCE(d)/SCE(b): Squared Calibration Errors (debiased/biased). Lower scores indicate better calibration. Bold/underscored numbers are the best/second-best among comparisons, respectively.", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"html": null, |
|
"content": "<table><tr><td>Thomas Wolf, Lysandre Debut, Victor Sanh, Julien</td></tr><tr><td>Chaumond, Clement Delangue, Anthony Moi, Pier-</td></tr><tr><td>ric Cistac, Li Yuan, Francis EH Tay, Guilin Li, Tao Wang, and</td></tr><tr><td>Jiashi Feng. 2020. Revisit knowledge distillation: a</td></tr><tr><td>teacher-free framework. In CVPR.</td></tr><tr><td>Sukmin Yun, Jongjin Park, Kimin Lee, and Jinwoo</td></tr><tr><td>Shin. 2020. Regularizing class-wise predictions via</td></tr><tr><td>self-knowledge distillation. In CVPR.</td></tr><tr><td>Bianca Zadrozny and Charles Elkan. 2001. Obtaining</td></tr><tr><td>calibrated probability estimates from decision trees</td></tr><tr><td>and naive bayesian classifiers. In ICML.</td></tr><tr><td>Xinchuan Zeng and Tony R. Martinez. 2000. Using a</td></tr><tr><td>neural network to approximate an ensemble of clas-</td></tr><tr><td>sifiers. Neural Processing Letters.</td></tr></table>", |
|
"text": "Tim Rault, R'emi Louf, Morgan Funtowicz, and Jamie Brew. 2019. Huggingface's transformers: State-of-the-art natural language processing. ArXiv, abs/1910.03771.", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF7": { |
|
"html": null, |
|
"content": "<table><tr><td>: In-domain performances on SST-2 (S), CoLA (C), MultiNLI (MN), MRPC (MR), QQP (QQ), QNLI (QN), RTE (R), WNLI (W). Note that for the metrics we considered</td><td>here, lower scores indicate better calibration.</td></tr></table>", |
|
"text": "", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF10": { |
|
"html": null, |
|
"content": "<table><tr><td>Average</td><td/><td/><td/></tr><tr><td/><td>R</td><td/><td/></tr><tr><td/><td>QN</td><td/><td/></tr><tr><td>W</td><td>M2</td><td/><td/></tr><tr><td/><td>W</td><td/><td/></tr><tr><td/><td>QN</td><td/><td/></tr><tr><td>R</td><td>M2</td><td/><td/></tr><tr><td/><td>W</td><td/><td/></tr><tr><td/><td>R</td><td/><td/></tr><tr><td>MR QQ QN</td><td>QQ MR M2</td><td>Accuracy (+SBC)</td><td/></tr><tr><td>Average</td><td/><td/><td/></tr><tr><td/><td>R</td><td/><td/></tr><tr><td/><td>QN</td><td/><td/></tr><tr><td>W</td><td>M2</td><td/><td/></tr><tr><td/><td>W</td><td/><td/></tr><tr><td/><td>QN</td><td/><td>61.0</td></tr><tr><td>R</td><td>M2</td><td/><td>67.3</td></tr><tr><td/><td>W</td><td/><td>47.2</td></tr><tr><td/><td>R</td><td/><td>57.6</td></tr><tr><td>QQ QN</td><td>MR M2</td><td/><td>68.1 58.6</td></tr><tr><td>MR</td><td>QQ</td><td>Accuracy</td><td>68.4</td></tr><tr><td>Train</td><td>Eval</td><td/><td>Ours</td></tr></table>", |
|
"text": "In-domain ablation performances on SST-2 (S), CoLA (C), MultiNLI (MN), MRPC (MR), QQP (QQ), QNLI (QN), RTE (R), WNLI (W). Note that for the metrics we considered here, lower scores indicate better calibration.", |
|
"num": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |