|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T03:29:04.370327Z" |
|
}, |
|
"title": "A Case Study of Efficacy and Challenges in Practical Human-in-Loop Evaluation of NLP Systems using Checklist", |
|
"authors": [ |
|
{ |
|
"first": "Shaily", |
|
"middle": [], |
|
"last": "Bhatt", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Research", |
|
"location": { |
|
"settlement": "Bangalore", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft R&D", |
|
"location": { |
|
"settlement": "Hyderabad", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Sandipan", |
|
"middle": [], |
|
"last": "Dandapat", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft R&D", |
|
"location": { |
|
"settlement": "Hyderabad", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Sunayana", |
|
"middle": [], |
|
"last": "Sitaram", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Research", |
|
"location": { |
|
"settlement": "Bangalore", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Despite state-of-the-art performance, NLP systems can be fragile in real-world situations. This is often due to insufficient understanding of the capabilities and limitations of models and the heavy reliance on standard evaluation benchmarks. Research into non-standard evaluation to mitigate this brittleness is gaining increasing attention. Notably, the behavioral testing principle 'Checklist', which decouples testing from implementation revealed significant failures in state-of-the-art models for multiple tasks. In this paper, we present a case study of using Checklist in a practical scenario. We conduct experiments for evaluating an offensive content detection system and use a data augmentation technique for improving the model using insights from Checklist. We lay out the challenges and open questions based on our observations of using Checklist for humanin-loop evaluation and improvement of NLP systems. Disclaimer: The paper contains examples of content with offensive language. The examples do not represent the views of the authors or their employers towards any person(s), group(s), practice(s), or entity/entities.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Despite state-of-the-art performance, NLP systems can be fragile in real-world situations. This is often due to insufficient understanding of the capabilities and limitations of models and the heavy reliance on standard evaluation benchmarks. Research into non-standard evaluation to mitigate this brittleness is gaining increasing attention. Notably, the behavioral testing principle 'Checklist', which decouples testing from implementation revealed significant failures in state-of-the-art models for multiple tasks. In this paper, we present a case study of using Checklist in a practical scenario. We conduct experiments for evaluating an offensive content detection system and use a data augmentation technique for improving the model using insights from Checklist. We lay out the challenges and open questions based on our observations of using Checklist for humanin-loop evaluation and improvement of NLP systems. Disclaimer: The paper contains examples of content with offensive language. The examples do not represent the views of the authors or their employers towards any person(s), group(s), practice(s), or entity/entities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "NLP systems have been known to learn spurious patterns from data to achieve high accuracy on test sets (Goyal et al., 2017; Gururangan et al., 2018; Glockner et al., 2018; Tsuchiya, 2018; Geva et al., 2019) . Evaluating models on static benchmarks and on test sets that have a similar distribution to the training data has resulted in an overestimation of model performance (Belinkov and Bisk, 2018; Recht et al., 2019) and models becoming increasingly fragile or less useful in real-world settings. This can be due to various factors such as language complexity and variability, the difference between training, testing, and real-world data, and insufficient understanding of the capabilities and limitations of the model itself. When deployed in the wild, such systems tend to break down, resulting in grossly incorrect predictions. This leads to mistrust in the system at two levels -first, on individual predictions and second, on the system's soundness in uncontrolled environments such as usage after deployment (Ribeiro et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 123, |
|
"text": "(Goyal et al., 2017;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 124, |
|
"end": 148, |
|
"text": "Gururangan et al., 2018;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 171, |
|
"text": "Glockner et al., 2018;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 172, |
|
"end": 187, |
|
"text": "Tsuchiya, 2018;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 188, |
|
"end": 206, |
|
"text": "Geva et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 374, |
|
"end": 399, |
|
"text": "(Belinkov and Bisk, 2018;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 400, |
|
"end": 419, |
|
"text": "Recht et al., 2019)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1018, |
|
"end": 1040, |
|
"text": "(Ribeiro et al., 2016)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Further, evaluation benchmarks are also becoming increasingly obsolete due to the exponential rise in data and compute-heavy systems that exceed performance expectations, bringing the benchmark's 'toughness' and hence, its reliability into question (Nie et al., 2020) . In order to mitigate this limitation of static evaluation, several approaches are used to evaluate other model aspects including, but not limited to, robustness (Rychalska et al., 2019) , fairness (Prabhakaran et al., 2019) , consistency (Ribeiro et al., 2019) , explanations (Ribeiro et al., 2016) , and adversarial performance (Ribeiro et al., 2018b; Iyyer et al., 2018; Nie et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 249, |
|
"end": 267, |
|
"text": "(Nie et al., 2020)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 431, |
|
"end": 455, |
|
"text": "(Rychalska et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 467, |
|
"end": 493, |
|
"text": "(Prabhakaran et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 508, |
|
"end": 530, |
|
"text": "(Ribeiro et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 546, |
|
"end": 568, |
|
"text": "(Ribeiro et al., 2016)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 599, |
|
"end": 622, |
|
"text": "(Ribeiro et al., 2018b;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 623, |
|
"end": 642, |
|
"text": "Iyyer et al., 2018;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 643, |
|
"end": 660, |
|
"text": "Nie et al., 2020)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Human-in-Loop processes can be used to complement the capabilities of automation with human expertise (Ribeiro et al., 2020; Potts et al., 2020; Nie et al., 2020; Ribeiro et al., 2018b) . Previous studies have shown that using humans to close the loop of the process of evaluation, explanation, or improvement can lead to a much better understanding of the system through higher explainability (Ribeiro et al., 2018a (Ribeiro et al., , 2016 , better detection of model failures (Ribeiro et al., 2020; Iyyer et al., 2018) , and easier bug-fixing (Ribeiro et al., 2020 (Ribeiro et al., , 2018b , resulting in robustness of the model in practical scenarios and increased trust in its predictions. Ribeiro et al. (2020) introduced a behavioral testing strategy that decouples testing from model implementation. Using human-generated test sets, they showed that state-of-the-art NLP models for multiple tasks fail to perform well for basic capabilities. We describe the framework in detail in section 2.", |
|
"cite_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 124, |
|
"text": "(Ribeiro et al., 2020;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 125, |
|
"end": 144, |
|
"text": "Potts et al., 2020;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 145, |
|
"end": 162, |
|
"text": "Nie et al., 2020;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 163, |
|
"end": 185, |
|
"text": "Ribeiro et al., 2018b)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 394, |
|
"end": 416, |
|
"text": "(Ribeiro et al., 2018a", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 417, |
|
"end": 440, |
|
"text": "(Ribeiro et al., , 2016", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 478, |
|
"end": 500, |
|
"text": "(Ribeiro et al., 2020;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 501, |
|
"end": 520, |
|
"text": "Iyyer et al., 2018)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 545, |
|
"end": 566, |
|
"text": "(Ribeiro et al., 2020", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 567, |
|
"end": 591, |
|
"text": "(Ribeiro et al., , 2018b", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 694, |
|
"end": 715, |
|
"text": "Ribeiro et al. (2020)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we describe a case study of using the Checklist paradigm of evaluation in a practi-cal scenario. Specifically, we used Checklist to evaluate and debug an offensive content detection system. We found that Checklist can lead to effective pinpointing of specific capabilities for which the model, despite impressive performance on a standard benchmark test set, failed. Further, these insights can be used to improve the model using targeted data augmentation to debug specific model failures. However, we found that using Checklist for evaluation and improvement is not always foolproof. We discuss the challenges and open questions observed during these experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The rest of the paper is organized as follows: First, we give a brief overview of the Checklist framework. In Section 3, we describe our case study, including the capabilities we test, results on the base model, the method applied to use these insights for improvement, and the results thereafter. Finally, we present a detailed analysis of some of the most imminent challenges with using Checklist for our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The Checklist framework (Ribeiro et al., 2020) introduces a human-in-loop behavioral testing technique for evaluating NLP systems. The authors argue that even though models perform well on static benchmarks, they fail to perform in realworld scenarios for basic capabilities. They release an open-source package 1 with functionality to create template sets and run software engineering-like decoupled testing on black-box models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview of Checklist", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The individual phenomena tested using Checklist are known as capabilities. These capabilities are based on model expectations and the language usage that it needs to handle. For example, Negation is a capability of a Sentiment Analysis model -the model should be able to distinguish 'happy' and 'not happy' as two opposite sentiments despite the overlapping word 'happy'.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview of Checklist", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The Checklist framework provides three different test types. The Minimum Functionality Tests (MFTs) are simple tests, similar to unit tests in software testing, that can test predictions on specific model capabilities. Most of the capabilities tested in our case study are MFTs. Invariance tests (INVs) are a test type where small semantic-preserving perturbations are applied to the test cases, and it is expected that the model output should not change. For example -in our case, while testing for the Ro-1 https://github.com/marcotcr/checklist bustness of the model, small typos are introduced. Directional Expectation tests (DIRs) are similar to INV, except that the model output is expected to change in a certain way.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview of Checklist", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In order for humans to generate test cases, Checklist uses Templates and Lexicons. For example: 'I {POSITIVE VERB} {ACTIVITY}.' is a template. 'POSITIVE VERB' and 'ACTIVITY' in this template are two different keywords in the lexicon, each taking a specific set of values. For example, POSITIVE VERB = ['like', 'love', 'enjoy'] and ACTIVITY = ['dancing', 'hiking' 'cooking', 'coding'] . The template generates 12 examples -the Cartesian product of the values of the two lexicon keywords in the template.", |
|
"cite_spans": [ |
|
{ |
|
"start": 301, |
|
"end": 326, |
|
"text": "['like', 'love', 'enjoy']", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 383, |
|
"text": "['dancing', 'hiking' 'cooking', 'coding']", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview of Checklist", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In the original Checklist work, the authors test state-of-the-art and commercial systems across three tasks revealing unprecedented failure rates even for the most basic capabilities. For more detailed information on these results, we refer the reader to the original paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview of Checklist", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We used Checklist to evaluate and improve an inhouse offensive content detection system. While we studies a particular system as a use case, the process can be applied to any NLP model. The first step was to create a documentation of expectations. This documentation was important to lay out guidelines for what constitutes positive or negative examples in specific settings. In accordance with standard text classification annotation, for this task, offensive content is the positive class, and non-offensive content is the negative class. The documentation or guidelines contained concrete definitions of various offensive content categories that the model is expected to detect. These are henceforth known as the model capabilities and are described in section 3.1. This is important for Checklist evaluation because it relies on building templates that target specific model capabilities. We gave this documentation to the annotators who were expected to use these guidelines to come up with templates of specific capabilities that could be used to find bugs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Case Study of Using Checklist", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Since offensive classification is a task that inherently has a class imbalance with fewer examples in the positive class in most publicly available datasets (de Gibert et al., 2018; Davidson et al., 2017) , including ours, we asked the annotators to develop templates that would result in offensive data. An-other reason for this design decision was that while offensive content is more likely to follow a certain pattern -a combination of entities and offensive language, non-offensive content would not follow any discernible patterns making it difficult to convert it into templates. Further, from an application perspective in real-world settings, where such a model is employed to flag or filter offensive content, a higher recall is preferable, as false negatives (examples that are offensive but predicted as nonoffensive) need to be avoided even at the cost of a few false positives.", |
|
"cite_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 181, |
|
"text": "(de Gibert et al., 2018;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 182, |
|
"end": 204, |
|
"text": "Davidson et al., 2017)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Case Study of Using Checklist", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Based on the documentation the capabilities tested for detection of offensive content were:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Capabilities", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "1. Characterization: Derogatory content/Defamation targeted towards individuals or groups (people who share certain attributes like sex, race, religion, nationality, occupation, age, etc.) including stereotypes. Also includes positive and negative characterizations of individuals or groups including comments on sexual orientation, ethics, morality, habits, physical appearance, or other characteristics. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Capabilities", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We used one template set and documented the failure rates of an in-house XLMR-based offensive content detection model, henceforth referred to as the base model. This base model had a good performance on the standard static benchmark test sets of 10k instances, 2 similar to other state-of-the-art systems for offensive content detection. The static test set had a similar distribution to the training data and has roughly 2.5k positive and 7.5k negative examples. The metrics on the static test set are available in the base model row of Table 1 . The model was a 24-layer transformer-based XLM-R model finetuned with 481k examples of offensive and hate speech data, out of which 198k were positive examples. The model was trained with a learning rate of 5e-6 for 10 epochs with a batch size of 128.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 538, |
|
"end": 545, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Using Checklist to Evaluate the Base Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Targeting specific capabilities using Checklist showed huge failure rates, indicating that the model still failed to meet the expectations even with good performance metrics. This is consistent with the original findings of Ribeiro et al. 2020, where multiple state-of-the-art models were found to have huge failure rates for the many basic capabilities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Using Checklist to Evaluate the Base Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Particularly in derogatory content, offensive content against a specific person seemed to be tougher to detect as compared to offensive content against a group. This may be because the names used to generate the test cases for offensive content against a person may not necessarily be names of famous people seen in training data, and the model was unable to generalize offensive language detection to unseen named entities. It is also possible that offensive content against specific groups is a more sensitive issue and is thus represented more in the base model's training data. Further, the model was unable to handle negation very well. This is consistent with the findings of Ribeiro et al. 2020who also found that state-of-the-art sentiment analysis models failed much more when dealing with negation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Using Checklist to Evaluate the Base Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In capabilities of unsafe and violent content, the failure rates were comparatively lower. This can be attributed to the fact that such content is more likely to contain specific keywords or patterns that the model has learned to classify as offensive during training, resulting in lower failure rates even when tested using templates. In Racy content, however, this might have been tougher. This is because racy content is often observed to be multi-intentioned. The same content can be an innocent statement or a racy statement. For example, words like 'cock' or 'chicks' that are often used in an explicit or racy sense and can also refer to their actual (non-racy) meaning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Using Checklist to Evaluate the Base Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Finally, Checklist evaluation revealed that the model was NOT robust to minor perturbations. This is an important finding because it is expected that the model would come across content that the user intentionally or unintentionally mistypes. However, such perturbations may not have been reflected in the training and standardized testing data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Using Checklist to Evaluate the Base Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "As seen in the Base Model row of Table 2 , the failure rates for examples generated by templates of specific capabilities is high.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 33, |
|
"end": 40, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Using Checklist to Evaluate the Base Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "So far, consistent with previous results, Checklist evaluation revealed important gaps in the base model. The interesting follow-up question is how to use these insights to improve the model to overcome current limitations. Typically, in deep learning models, model improvements result from improved model architectures, better training or finetuning strategies, or more data. However, these strategies do not directly address the limitation of models in specific capabilities in the way that Checklist reveals. Thus, we explored the use of insights from the human-in-loop evaluation that can help improve the model in these specific settings while also testing the improved model against standard benchmarks used to evaluate the model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Improving the Model", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We used an iterative process of data augmentation to improve the model using the insights of model failures from Checklist evaluation and data generated by templates. We chose this iterative data augmentation method due to its demonstrated success in improving NLI models by Nie et al. (2020) , who proposed iterative Human-And-Modelin-the-Loop-Enabled-Training (HAMLET) to create dynamic and harder adversarial test sets for that 'fools' 3 the model. These harder examples are then used to re-train the model, and the process is repeated. Potts et al. (2020) also successfully use a similar human-in-loop feedback process with data augmentation to create iterations of datasets and better models for sentiment analysis. Our process in spirit is similar, except instead of adversarial examples, we focused on specific capabilities from the Checklist evaluation using templates. A set of examples generated from the same Checklist templates, which is disjoint from the test examples themselves, were appended to the model's original training set, and the model was re-trained. This yielded a new model, henceforth called the augmented model. The augmented model was then tested on the set of examples that was earlier used to test the base model. Specifically, TS-1 was the set of templates used to test the base model. This template set was generated by a human annotator, known to have sufficient expertise of English. The data generated from the TS-1 was divided into a training subset (TrS) and test subset (TeS) with a ratio of 60:40. First, the base model's failure rates on TeS were recorded as shown in Table 2 . Now the TrS was combined with the base model's training data, and the model was re-trained. This re-trained model is called the augmented model. The data from TeS was now used to test this augmented model for the capabilities 3 flips the output of the model captured in TS-1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 275, |
|
"end": 292, |
|
"text": "Nie et al. (2020)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 540, |
|
"end": 559, |
|
"text": "Potts et al. (2020)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1610, |
|
"end": 1617, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Augmentation Methodology", |
|
"sec_num": "3.3.1" |
|
}, |
|
{ |
|
"text": "We found that the failure rates of the augmented model dropped significantly. Interestingly, the performance on the static evaluation test sets neither improved nor degraded substantially, which can be seen in Table 1 . Here, Aug-1 was the model obtained by retraining the base model with the original data plus data from TrS of TS-1. The rest of the four augmented models, Aug 2-5, will be described subsequently. This shows that while data augmentation helps specific capabilities, it does not degrade performance on the static benchmark leading to the conclusion that the retrained model is not over-fitted to the examples generated using Checklist.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 210, |
|
"end": 217, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Performance After Data Augmentation", |
|
"sec_num": "3.3.2" |
|
}, |
|
{ |
|
"text": "The fact that the performance on the benchmark test set did not improve showed that static benchmark evaluation sets failed to evaluate the model rigorously enough for important capabilities. Adding data points that make the model more robust to such examples improves the model overall. However, this improvement was not captured in the static evaluation as the test set might not have contained such specific examples for these capabilities in the first place. This is why the failure of model in these scenarios went unnoticed till it was evaluated specifically for those capabilities using Checklist. This observation bolsters the case of using Checklist evaluation for better understanding and explainability of the limitations of the model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Performance After Data Augmentation", |
|
"sec_num": "3.3.2" |
|
}, |
|
{ |
|
"text": "The fact that TrS and TeS have very similar (but not the same) examples as they were generated from the same set of templates can be one reason for the augmented model's extremely low failure rate. However, to analyze whether the model learned generalizable capabilities from the TS-1 and ascertain that these gains in performance corresponding to lower failure rates are not specific to a template set, we asked a new independent annotator to use the documentation guidelines to create templates from scratch. The data generated from this independently generated template set is used to evaluate the base and augmented model (Aug-1, which was trained on data from TS-1). This process was carried out with four different annotators, resulting in 4 new augmented models (Aug 2-5).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Testing on Multiple Template Sets", |
|
"sec_num": "3.3.3" |
|
}, |
|
{ |
|
"text": "Specifically, we created a larger study and asked four more annotators to create template sets independently using the documentation guidelines. These template sets are called TS-2, TS-3, TS-4, and TS-5. Data from each template set was also split into training and testing sets with a ratio of 60:40. The same base model was first used, and failure rates were recorded on each template set. Next, four more augmented models were created (Aug-2, Aug-3, Aug-4, and Aug-5). For creating Aug-i (2\u2264i\u22645), the training data from TS-i was combined with the base model's training data, and the model was re-trained on this entire dataset. Now the failure rates of Aug-i were recorded on the held out test set (data points coming from the same templates but disjoint from the training augmented data) of TS-i and the entire data from the rest of the template sets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Testing on Multiple Template Sets", |
|
"sec_num": "3.3.3" |
|
}, |
|
{ |
|
"text": "All template sets were generated by annotators with expertise of English language and were crosschecked for correctness. The number of templates in each template sets ranged from 18-25 distributed among the different capabilities. The number of examples generated from template sets that were added to retraining of the model (including perturbed examples for robustness test) were close to 50k for each of the augmented models. There were no templates that were exactly the same in any pairs of template sets, though, there were some templates that were similar. The overlap in terms of examples generated was less than 0.02% between any of the sets. The lexicon keywords had some common vocabulary. However, this can be expected due to the specificity of the task and the words that are commonly used in such offensive statements.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Testing on Multiple Template Sets", |
|
"sec_num": "3.3.3" |
|
}, |
|
{ |
|
"text": "We report the average failure rates in Table 3 . The reported average is the weighted average of failure rates across different capabilities, weighted according to the number of examples the template set has for that capability. The results across template sets vary, and we discuss the challenge of ascertaining template quality in detail in section 4.3.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 46, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Performance on Multiple Template Sets", |
|
"sec_num": "3.3.4" |
|
}, |
|
{ |
|
"text": "We found that for all our augmented models, the failure rates between the base and augmented model significantly differed for the test holdout of its own template set. Further, the augmented models showed better performance than the base model across all examples from all other template sets that were generated independently by different annotators. In fact, we saw improvements up to 15-20% in multiple cases (e.g. Aug-1 on Ts-3 and Ts-5) . This indicates that the model did learn some generalizable capabilities irrespective of the template set used for augmentation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Performance on Multiple Template Sets", |
|
"sec_num": "3.3.4" |
|
}, |
|
{ |
|
"text": "Grouping the results by capability, we found a general trend of lower failure rates in augmented models. There were no clear trends of a particular capability consistently benefitting more or less. The failure rates of Template Sets 2-5 on the Augmented models 2-5 and base model grouped by capabilities are in the Appendix.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Performance on Multiple Template Sets", |
|
"sec_num": "3.3.4" |
|
}, |
|
{ |
|
"text": "Our case study was an experiment of using Checklist to debug NLP systems. It presents optimistic findings for using human-in-loop for improving model performance. However, using this technique for evaluation and improvement is not straightforward or foolproof. In this section, we discuss some nuances and challenges that we observed while conducting these experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Challenges and Open Questions", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The process, while effective, is intensive in both human and computational resources.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Resource Requirement", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Generating templates from scratch required a significant amount of annotator hours. In our experiments, it took 1 hour to create 5-7 templates spanning 1-2 capabilities. This time can vary from person to person. A single annotator required a minimum of half of a workday 4 and a maximum of 2 workdays to come up with template sets. The time may also vary based on the task for which templates are being generated.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Resource Requirement", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Once the template sets are created, generating Checklist reports for evaluation is computationally cheap, the cost of model inference notwithstanding. However, using the insights of this evaluation to carry out the targeted data augmentation procedure can be compute-heavy. Retraining the model can cost significant time, money, and energy. Fine-tuning, though computationally cheaper can lead to over-fitting on template sets, which is why we chose not to take the approach. Further, going through the iterative and parallel versions of the process would require further investment of human and computational resources to generate more template sets, employ more annotators, and repeated retraining of the model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Resource Requirement", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In the current version of the process, we used a simple but effective iterative data augmentation procedure. While this is effective in our case it can lead to over-fitting or catastrophic forgetting in deep learning models. Furthermore, as stated earlier, the process itself is compute-expensive.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods to Improve the Model", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Data augmentation may not be the only (or the most optimal) solution. Some other methods that can be utilized are continual training or fine-tuning. Furthermore, there can be more than one way to combine the initial training and template generated datasets to yield better performance. Thus, the effective use of the insights from Checklist evaluation still remains an open question for future studies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods to Improve the Model", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "An important question for any evaluation technique, whether static benchmark or human-generated templates, is its quality. In both cases, it is difficult to quantify quality.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Template and Template Set Quality", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The main reason why it is important to estimate the quality of template sets is evident from the results of Table 3 . None of the augmented models are better for all the template sets across the board, and performance on the same template set can vary significantly for different augmented models created by augmenting different data points. Thus, the templates that humans come up with and the examples that those templates generate can significantly impact how much the model improves.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 115, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Template and Template Set Quality", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Quality can be viewed in two ways, absolute quality, and relative quality. Absolute quality of a template refers to easily quantifiable measures such as the number of examples it generates and the capabilities it covers. On the other hand, two templates are compared for their quality in the case of relative quality. In this case, the higher quality template would intuitively be one that can find more bugs or result in higher failure rates in the model. It is important to note that higher absolute quality may not always result in higher relative quality. A template can generate more examples and cover more capabilities and give low failure rates, leading to finding fewer bugs than another template that generates fewer examples or spans fewer capabilities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Template and Template Set Quality", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Relative template quality is a more effective way for quality analysis of templates because it is driven by failure rates of the model on the template compared to other templates, and this is the basis of finding bugs using Checklist. However, whether a template is 'tougher' (hence, of higher quality with respect to relative quality evaluation) or 'easier' is subjective to the model and its training data. In other words, a template that results in higher failure rates for a particular model as compared to another template of the same capability can show lower failure rates when used with another model and vice versa. Furthermore, human analysis of template quality may not always sync with the model performance. That is, a template that a human may deem to be 'tougher' for a model may not be so.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Template and Template Set Quality", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Following the definition of template quality for individual templates may not always extrapolate to a template set's quality. That is, while comparing the quality of two template sets, it can be possible (and in fact, often observed in our study) that a template set may contain some templates that are of a higher relative quality and some templates that are of lower relative quality as compared to the templates of another template set. This makes it even more difficult to quantify even the relative quality of template sets that span multiple templates and capabilities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Template and Template Set Quality", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Further, template generation by humans is an endless process; one can keep on generating more and more templates given time. In fact, in the extreme situation, it is possible that the an iteration of Checklist evaluation may not reveal any actionable bugs, in such a scenario, it would be unclear as to how many iterations would be needed in order to claim that the model does not have any bugs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Template and Template Set Quality", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Moreover, within template sets, multiple capabilities are covered by putting together different templates by annotators. Our results show that this does not lead to consistent improvements across capabilities. Thus, obtaining the best combination of different templates is not straight-forward. A detailed study into what constitutes better quality templates can help ascertain a more effective selection process from a large set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Template and Template Set Quality", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Finally, multiple templates can be combined to form template sets, and how to put together template sets that uniformly benefit all capabilities is unclear. Thus, techniques to find the optimal and representative template sets generated with little human effort and can be relied upon for holistic evaluation are an imminent challenge and makes quality estimation of templates and template set an important open question.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Template and Template Set Quality", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Since annotators are an indispensable part of this study, it was important to understand their perspective. We thus interviewed the annotators in order to gain insight into their experience.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experience of Annotators", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "For our study, the annotators can be considered as 'experts' 5 . The common feedback we received was that it was difficult to come up with the template sets from scratch. On further probing, this difficulty could be broken down into multiple steps.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experience of Annotators", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "First, generating offensive content templates needs specific vocabulary, also known as lexicons in Checklist. Creating these lexicons from scratch can be subject to creativity and offensive language usage. Further, using these lexicons to generate templates is again subject to creativity, which varies from person to person, and can be difficult to replicate from a scientific perspective. This difficulty can be ported to almost any task for which templates are to be generated as it would need the creation of specific lexicon vocabulary and their combinations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experience of Annotators", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Secondly, it is not easy to ascertain what set of templates is best. As discussed in the template quality section, while the quality of individual templates can be judged by failure rates, for an annotator developing templates in a limited time-frame, the template set generated may not always be optimal, or the best possible set that finds the maximum bugs. Thus, without instant model feedback, deciding which templates are good and which are not is difficult, and finding the most optimal template set may not be feasible.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experience of Annotators", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Finally, from the perspective of this particular task of the case-study, offensive content itself is a topic open to interpretation from perspectives of communities coming from varied socio-cultural backgrounds and individual sentiments, philosophy, and beliefs. What one person may find offensive, another person may not, and vice versa. As a result, despite well-documented qualitative guidelines of expectations from the models, individual examples can have debatable annotation. This ambiguity is also carried into the template generation process, where an annotator's individuality may reflect in the offensive templates that they generate. Typically, it is easier for humans to verify annotations or explanations rather than generating them from scratch. This can be extended to judging whether a template is correct and useful. Thus developing techniques that can be utilized for automated template creation from small seed data followed by verification and labeling by humans can be an important future research direction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experience of Annotators", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Given the rapid adoption of massive multilingual systems in NLP, there is an increasing need for evaluation in other languages. Thus it is intuitive to feel the need to use Checklist for multilingual models. However, template generation would typically need a native or fluent speaker of the language. It can often be difficult for researchers building massive multilingual systems to find experts fluent in multiple or specific languages. While the open-source Checklist framework provides limited capability of generating multilingual templates, it is not powerful enough to automate the process for different languages without sufficient human supervision. Thus, developing ways to create multilingual Checklists using Checklists in one language easily has immense scope.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilinguality", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "State-of-the-art systems have been known to break down when deployed in the wild because of heavy reliance on static evaluation benchmarks that fail to holistically test the system. Several non-standard forms of evaluation into specific aspects of the models can lead to insights that might otherwise go unnoticed. Human-in-loop processes have been known to aid better explainability, trust, debugging, and improvement of NLP models by combining automation with human-expertise of language use. The Checklist framework introduced a behavioral testing approach for finding bugs in NLP models, which showed that state-of-the-art systems fail on the simplest of capabilities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We presented a case study of using Checklist to debug an English offensive content detection system. The process we utilized was two-staged: First, we employed a human annotator to generate templates for evaluating specific model capabilities. These results were leveraged to find bugs, or capabilities in which the model is not performing as per expectation. The second step was to augment the data generated from these templates and re-train the model. This led to targeted bug-fixing and better performance not just on the test sets created from the same templates, but more generally, on independently created template sets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Using this technique led to not only improved models but also a better understanding of the limitations and capabilities of the model in context of specific requirements. Our findings add to the growing optimism of using human expertise and non-standard evaluation to improve performance, better explainability, and increase trust in NLP systems deployed in real-world uncontrolled usage environments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We also discuss various challenges of employing such a human-in-loop strategy. These include resource requirements, different methods to improve the model, determining the quality of templates and template sets, finding the optimal and representative template set, the difficulty for human subjects to create templates from scratch, and extension of the paradigm to languages other than English. This leads to the conclusion that the process, even though beneficial, leaves many open questions that need to be addressed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We hope that our work further increases attention to the Checklist paradigm and motivates researchers to evaluate and improve black box NLP models using non-standard and explainable humanin-loop evaluation and investigate its challenges. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "a workday is taken as 8 hours", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "educated in English and having understanding equivalent to graduate-level courses in natural language processing and machine learning", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank our colleagues Monojit Choudhary, Kalika Bali, Somak Aditya, Anirudh Srinivasan, and Karthikeyan K for their invaluable inputs during the course of this work. We extend gracious thanks to our five amazing annotators who put a lot of time and effort in understanding the problem, creating templates, and giving us their feedback.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Synthetic and natural noise both break neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Belinkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Bisk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "6th International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yonatan Belinkov and Yonatan Bisk. 2018. Synthetic and natural noise both break neural machine trans- lation. In 6th International Conference on Learn- ing Representations, ICLR 2018, Vancouver, BC, Canada, April 30 -May 3, 2018, Conference Track Proceedings. OpenReview.net.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Automated hate speech detection and the problem of offensive language", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Davidson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dana", |
|
"middle": [], |
|
"last": "Warmsley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Macy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ingmar", |
|
"middle": [], |
|
"last": "Weber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 11th International AAAI Conference on Web and Social Media, ICWSM '17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "512--515", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Davidson, Dana Warmsley, Michael Macy, and Ingmar Weber. 2017. Automated hate speech detection and the problem of offensive language. In Proceedings of the 11th International AAAI Confer- ence on Web and Social Media, ICWSM '17, pages 512-515.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Are we modeling the task or the annotator? an investigation of annotator bias in natural language understanding datasets", |
|
"authors": [ |
|
{ |
|
"first": "Mor", |
|
"middle": [], |
|
"last": "Geva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Berant", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1161--1166", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1107" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mor Geva, Yoav Goldberg, and Jonathan Berant. 2019. Are we modeling the task or the annotator? an inves- tigation of annotator bias in natural language under- standing datasets. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 1161-1166, Hong Kong, China. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Hate speech dataset from a white supremacy forum", |
|
"authors": [ |
|
{ |
|
"first": "Ona", |
|
"middle": [], |
|
"last": "De Gibert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naiara", |
|
"middle": [], |
|
"last": "Perez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2nd Workshop on Abusive Language Online (ALW2)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11--20", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-5102" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ona de Gibert, Naiara Perez, Aitor Garc\u00eda-Pablos, and Montse Cuadros. 2018. Hate speech dataset from a white supremacy forum. In Proceedings of the 2nd Workshop on Abusive Language Online (ALW2), pages 11-20, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Breaking NLI systems with sentences that require simple lexical inferences", |
|
"authors": [ |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Glockner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vered", |
|
"middle": [], |
|
"last": "Shwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "650--655", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-2103" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Max Glockner, Vered Shwartz, and Yoav Goldberg. 2018. Breaking NLI systems with sentences that re- quire simple lexical inferences. In Proceedings of the 56th Annual Meeting of the Association for Com- putational Linguistics (Volume 2: Short Papers), pages 650-655, Melbourne, Australia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Making the V in VQA matter: Elevating the role of image understanding in visual question answering", |
|
"authors": [ |
|
{ |
|
"first": "Yash", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tejas", |
|
"middle": [], |
|
"last": "Khot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douglas", |
|
"middle": [], |
|
"last": "Summers-Stay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "2017 IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6325--6334", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/CVPR.2017.670" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. 2017. Making the V in VQA matter: Elevating the role of image un- derstanding in visual question answering. In 2017 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, Honolulu, HI, USA, July 21-26, 2017, pages 6325-6334. IEEE Computer So- ciety.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Annotation artifacts in natural language inference data", |
|
"authors": [ |
|
{ |
|
"first": "Swabha", |
|
"middle": [], |
|
"last": "Suchin Gururangan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Swayamdipta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "107--112", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-2017" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Suchin Gururangan, Swabha Swayamdipta, Omer Levy, Roy Schwartz, Samuel Bowman, and Noah A. Smith. 2018. Annotation artifacts in natural lan- guage inference data. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 107-112, New Orleans, Louisiana. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Adversarial example generation with syntactically controlled paraphrase networks", |
|
"authors": [ |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Wieting", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1875--1885", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-1170" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohit Iyyer, John Wieting, Kevin Gimpel, and Luke Zettlemoyer. 2018. Adversarial example generation with syntactically controlled paraphrase networks. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 1875-1885, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Adversarial NLI: A new benchmark for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Yixin", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Dinan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4885--4901", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.441" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yixin Nie, Adina Williams, Emily Dinan, Mohit Bansal, Jason Weston, and Douwe Kiela. 2020. Ad- versarial NLI: A new benchmark for natural lan- guage understanding. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, pages 4885-4901, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Dynasent: A dynamic benchmark for sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Potts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengxuan", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atticus", |
|
"middle": [], |
|
"last": "Geiger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2012.15349" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher Potts, Zhengxuan Wu, Atticus Geiger, and Douwe Kiela. 2020. Dynasent: A dynamic benchmark for sentiment analysis. arXiv preprint arXiv:2012.15349.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Perturbation sensitivity analysis to detect unintended model biases", |
|
"authors": [ |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Vinodkumar Prabhakaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Margaret", |
|
"middle": [], |
|
"last": "Hutchinson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5740--5745", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1578" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vinodkumar Prabhakaran, Ben Hutchinson, and Mar- garet Mitchell. 2019. Perturbation sensitivity analy- sis to detect unintended model biases. In Proceed- ings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th Inter- national Joint Conference on Natural Language Pro- cessing (EMNLP-IJCNLP), pages 5740-5745, Hong Kong, China. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Do imagenet classifiers generalize to imagenet?", |
|
"authors": [ |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Recht", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Roelofs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ludwig", |
|
"middle": [], |
|
"last": "Schmidt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vaishaal", |
|
"middle": [], |
|
"last": "Shankar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 36th International Conference on Machine Learning, ICML 2019", |
|
"volume": "97", |
|
"issue": "", |
|
"pages": "5389--5400", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. 2019. Do imagenet classi- fiers generalize to imagenet? In Proceedings of the 36th International Conference on Machine Learn- ing, ICML 2019, 9-15 June 2019, Long Beach, Cali- fornia, USA, volume 97 of Proceedings of Machine Learning Research, pages 5389-5400. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Are red roses red? evaluating consistency of question-answering models", |
|
"authors": [ |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Marco Tulio Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Guestrin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6174--6184", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1621" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Tulio Ribeiro, Carlos Guestrin, and Sameer Singh. 2019. Are red roses red? evaluating con- sistency of question-answering models. In Proceed- ings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 6174-6184, Florence, Italy. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "why should I trust you?\": Explaining the predictions of any classifier", |
|
"authors": [ |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Marco T\u00falio Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Guestrin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1135--1144", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2939672.2939778" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco T\u00falio Ribeiro, Sameer Singh, and Carlos Guestrin. 2016. \"why should I trust you?\": Explain- ing the predictions of any classifier. In Proceed- ings of the 22nd ACM SIGKDD International Con- ference on Knowledge Discovery and Data Mining, San Francisco, CA, USA, August 13-17, 2016, pages 1135-1144. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Anchors: High-precision modelagnostic explanations", |
|
"authors": [ |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Marco T\u00falio Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Guestrin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence, (AAAI-18), the 30th innovative Applications of Artificial Intelligence (IAAI-18), and the 8th AAAI Symposium on Educational Advances in Artificial Intelligence (EAAI-18)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1527--1535", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco T\u00falio Ribeiro, Sameer Singh, and Carlos Guestrin. 2018a. Anchors: High-precision model- agnostic explanations. In Proceedings of the Thirty- Second AAAI Conference on Artificial Intelligence, (AAAI-18), the 30th innovative Applications of Arti- ficial Intelligence (IAAI-18), and the 8th AAAI Sym- posium on Educational Advances in Artificial Intel- ligence (EAAI-18), New Orleans, Louisiana, USA, February 2-7, 2018, pages 1527-1535. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Semantically equivalent adversarial rules for debugging NLP models", |
|
"authors": [ |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Marco Tulio Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Guestrin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "856--865", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1079" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Tulio Ribeiro, Sameer Singh, and Carlos Guestrin. 2018b. Semantically equivalent adversar- ial rules for debugging NLP models. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 856-865, Melbourne, Australia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Beyond accuracy: Behavioral testing of NLP models with CheckList", |
|
"authors": [ |
|
{ |
|
"first": "Tongshuang", |
|
"middle": [], |
|
"last": "Marco Tulio Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Guestrin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4902--4912", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.442" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Tulio Ribeiro, Tongshuang Wu, Carlos Guestrin, and Sameer Singh. 2020. Beyond accuracy: Be- havioral testing of NLP models with CheckList. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 4902- 4912, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Models in the wild: On corruption robustness of neural nlp systems", |
|
"authors": [ |
|
{ |
|
"first": "Barbara", |
|
"middle": [], |
|
"last": "Rychalska", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dominika", |
|
"middle": [], |
|
"last": "Basaj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alicja", |
|
"middle": [], |
|
"last": "Gosiewska", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Przemys\u0142aw", |
|
"middle": [], |
|
"last": "Biecek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Neural Information Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "235--247", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Barbara Rychalska, Dominika Basaj, Alicja Gosiewska, and Przemys\u0142aw Biecek. 2019. Models in the wild: On corruption robustness of neural nlp systems. In International Conference on Neural Information Processing, pages 235-247. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Performance impact caused by hidden bias of training data for recognizing textual entailment", |
|
"authors": [ |
|
{ |
|
"first": "Masatoshi", |
|
"middle": [], |
|
"last": "Tsuchiya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Masatoshi Tsuchiya. 2018. Performance impact caused by hidden bias of training data for recog- nizing textual entailment. In Proceedings of the Eleventh International Conference on Language Re- sources and Evaluation (LREC 2018), Miyazaki, Japan. European Language Resources Association (ELRA).", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"num": null, |
|
"text": "One Typo: Introducing one random typo in the example by flipping adjacent characters. Example: fcuk. (b) Two Typo: Introducing one random typo in the example by flipping adjacent characters. Example: All mne ar epigs. (c) Punctuation Insertion: Random inser-tion of punctuation marks in the example. Example: f*ck.", |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"content": "<table><tr><td>Beiber is not an ethical person, Gandhi</td></tr><tr><td>was not honest, etc.</td></tr><tr><td>(b) Negation of Positive Characterization</td></tr><tr><td>of Groups: Negating positive characteri-</td></tr><tr><td>zation of groups. Also includes content</td></tr><tr><td>that seeks or suggest negation of rights</td></tr><tr><td>or equality to specific groups. Exam-</td></tr><tr><td>ple: Females are not smart, Muslims are</td></tr><tr><td>not honest, Policemen cannot be good,</td></tr><tr><td>Women should not be allowed to go to</td></tr><tr><td>college, etc.</td></tr><tr><td>(c) Negation of Negative Characteriza-</td></tr><tr><td>tion of Individuals: Negating negative</td></tr><tr><td>characterization of individuals. Example:</td></tr><tr><td>Bin Laden was not bad, Hitler was not</td></tr><tr><td>brutal, etc.</td></tr><tr><td>(a) Simple Characterization of Individu-</td></tr><tr><td>als: Characterizing individuals. Exam-</td></tr><tr><td>ple: Trump is an idiot, is Joe Biden</td></tr><tr><td>racist?, Kevin Spacey gay, Fat Meghan</td></tr><tr><td>Markle, Bin Laden was amazing, etc.</td></tr><tr><td>(b) Simple Characterization of Groups:</td></tr><tr><td>Characterizing groups Example: Mexi-</td></tr><tr><td>cans are lazy, Are all whites racist, Jews</td></tr><tr><td>are greedy, females belong in the kitchen,</td></tr><tr><td>all men are pigs, people from middle east</td></tr><tr><td>stink, etc.</td></tr><tr><td>2. Negation: Negation of characteristics tar-</td></tr><tr><td>geted towards individuals or groups. Also in-</td></tr><tr><td>cludes negation of positive and negative char-</td></tr><tr><td>acterizations of individuals or groups includ-</td></tr><tr><td>ing comments on sexual orientation, ethics,</td></tr><tr><td>morality, habits, physical appearance, or other</td></tr><tr><td>characteristics.</td></tr><tr><td>(a) Negation of Positive characterization</td></tr><tr><td>of Individuals: Negating a positive char-</td></tr><tr><td>acterization of any individual. Also in-</td></tr><tr><td>cludes content that seeks or suggest nega-</td></tr><tr><td>tion of rights or equality to specific enti-</td></tr><tr><td>ties. Example: Trump is not smart, Justin</td></tr></table>", |
|
"text": "To test if the system is robust to minor perturbations like typos or punctuation that are consistent with intentional or unintentional usage in writing offensive content.", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"4\">Model Precision Recall F1 Score</td></tr><tr><td>Base</td><td>79.75</td><td>80.13</td><td>79.94</td></tr><tr><td>Aug-1</td><td>80.20</td><td>79.30</td><td>79.75</td></tr><tr><td>Aug-2</td><td>79.27</td><td>80.13</td><td>79.70</td></tr><tr><td>Aug-3</td><td>79.50</td><td>80.13</td><td>79.81</td></tr><tr><td>Aug-4</td><td>80.56</td><td>80.25</td><td>80.40</td></tr><tr><td>Aug-5</td><td>80.14</td><td>80.13</td><td>80.14</td></tr></table>", |
|
"text": "Created by three human judges with majority voting", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"content": "<table/>", |
|
"text": "Metrics on static benchmark test set", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"content": "<table><tr><td/><td>1.a</td><td>1.b</td><td>2.a</td><td>2.b</td><td>2.c</td><td>2.d</td><td>3</td><td>4</td><td>5</td><td>6.a</td><td>6.b</td><td>6.c</td></tr><tr><td>Aug-1</td><td>22.21</td><td>0.05</td><td>0.03</td><td>0</td><td>1.07</td><td>0.02</td><td>0</td><td>0</td><td>0</td><td>3.54</td><td>4.23</td><td>1.08</td></tr><tr><td>Total Examples</td><td>10k</td><td>5.5k</td><td>3k</td><td>6k</td><td>7.5k</td><td>5.5k</td><td colspan=\"2\">4.3k 1k</td><td colspan=\"4\">2.7k 12.3k 12.3k 12.7k</td></tr></table>", |
|
"text": "Base 47.37 22.84 32.13 36.73 37.77 46.16 18.70 6 44.70 46.62 59.11 42.61", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"2\">Model Training Data</td><td>TS-1</td><td>TS-2</td><td>TS-3</td><td>TS-4</td><td>TS-5</td></tr><tr><td>Base</td><td colspan=\"2\">Base training data (BTD) 38.0 (0)</td><td>20.05 (0)</td><td>28.24 (0)</td><td>26.35 (0)</td><td>34.14 (0)</td></tr><tr><td colspan=\"2\">Aug-1 BTD + data from TS-1</td><td>4.0* (-34)</td><td>12.67 (-7.38)</td><td>5.66 (-22.58)</td><td>18.83 (-7.52)</td><td>13.67 (-20.47)</td></tr><tr><td colspan=\"2\">Aug-2 BTD + data from TS-2</td><td colspan=\"3\">31.73 (-6.27) 0.01* (-20.04) 19.34 (-8.9)</td><td colspan=\"2\">13.28 (-13.28) 26.97 (-7.17)</td></tr><tr><td colspan=\"2\">Aug-3 BTD + data from TS-3</td><td colspan=\"2\">30.21 (-7.79) 15.53 (-4.52)</td><td colspan=\"2\">0.01* (-28.23) 26.11 (-0.24)</td><td>23.57 (-10.57)</td></tr><tr><td colspan=\"2\">Aug-4 BTD + data from TS-4</td><td colspan=\"2\">34.29 (-3.71) 9.61 (-10.44)</td><td>24.98 (-3.26)</td><td>0* (-26.35)</td><td>30.29 (-3.85)</td></tr><tr><td colspan=\"2\">Aug-5 BTD + data from TS-5</td><td colspan=\"2\">32.18 (-5.82) 15.74 (-4.31)</td><td>19.34 (-8.9)</td><td>19.35 (-7.0)</td><td>0.01* (-34.13)</td></tr></table>", |
|
"text": "Failure Rates (%) of Base and Improved models for Test holdout set for different capabilities 3.1", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"content": "<table/>", |
|
"text": "Average (weighted across different capabilities by number of examples in each) failure rates (%) of different models on independently created template sets. Figures in bracket show change in failure rate from the failure rate of base model tested on the particular template set (* refers to tested on the test holdout set such that the testing examples are disjoint from training data of the augmented model but come from the same template set)", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF7": { |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"5\">Model Characterization Negation Violence Unsafe</td><td>Robustness</td></tr><tr><td>Base</td><td>21.21</td><td>18.27</td><td>20.98</td><td/><td>35.26</td></tr><tr><td colspan=\"2\">Aug-2 16.88</td><td>3.90</td><td>12.43</td><td/><td>24.22</td></tr><tr><td colspan=\"2\">Aug-3 0*</td><td>0*</td><td>0*</td><td>Annotator did not create Template</td><td>0.01*</td></tr><tr><td colspan=\"2\">Aug-4 23.56</td><td>9.15</td><td>15.69</td><td/><td>32.04</td></tr><tr><td colspan=\"2\">Aug-5 15.03</td><td>7.24</td><td>17.35</td><td/><td>22.88</td></tr></table>", |
|
"text": "Failure Rates (%) of grouped capabilities on Template Set -2. (* refers to tested on the test holdout set such that the testing examples are disjoint from training data of the augmented model but come from the same template set)", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"6\">Model Characterization Negation Violence Unsafe Robustness</td></tr><tr><td>Base</td><td>12.99</td><td>21.01</td><td>32.98</td><td>56.03</td><td>32.63</td></tr><tr><td colspan=\"2\">Aug-2 8.50</td><td>12.56</td><td>9.65</td><td>33.12</td><td>13.68</td></tr><tr><td colspan=\"2\">Aug-3 13.46</td><td>17.64</td><td>12.73</td><td>50.35</td><td>30.38</td></tr><tr><td colspan=\"2\">Aug-4 0*</td><td>0.01*</td><td>0*</td><td>0*</td><td>0*</td></tr><tr><td colspan=\"2\">Aug-5 9.43</td><td>11.89</td><td>15.28</td><td>43.5</td><td>21.99</td></tr></table>", |
|
"text": "Failure Rates (%) of grouped capabilities on Template Set -3. (* refers to tested on the test holdout set such that the testing examples are disjoint from training data of the augmented model but come from the same template set)", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF9": { |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"5\">Model Characterization Negation Violence Unsafe</td><td>Robustness</td></tr><tr><td>Base</td><td>14.41</td><td>24.21</td><td>98.48</td><td/><td>37.29</td></tr><tr><td colspan=\"2\">Aug-2 6.57</td><td>9.58</td><td>97.79</td><td/><td>29.07</td></tr><tr><td colspan=\"2\">Aug-3 3.85</td><td>3.70</td><td>96.98</td><td>Annotator did not create Template</td><td>25.07</td></tr><tr><td colspan=\"2\">Aug-4 8.32</td><td>16.01</td><td>72.22</td><td/><td>33.35</td></tr><tr><td colspan=\"2\">Aug-5 0*</td><td>0*</td><td>0*</td><td/><td>0.01*</td></tr></table>", |
|
"text": "Failure Rates (%) of grouped capabilities on Template Set -4. (* refers to tested on the test holdout set such that the testing examples are disjoint from training data of the augmented model but come from the same template set)", |
|
"type_str": "table", |
|
"html": null |
|
}, |
|
"TABREF10": { |
|
"num": null, |
|
"content": "<table/>", |
|
"text": "Failure Rates (%) of grouped capabilities on Template Set -5. (* refers to tested on the test holdout set such that the testing examples are disjoint from training data of the augmented model but come from the same template set)", |
|
"type_str": "table", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |