|
{ |
|
"paper_id": "2022", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:52:35.531513Z" |
|
}, |
|
"title": "Challenges in Applying Explainability Methods to Improve the Fairness of NLP Models", |
|
"authors": [ |
|
{ |
|
"first": "Esma", |
|
"middle": [], |
|
"last": "Balk\u0131r", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Research Council Canada Ottawa", |
|
"location": { |
|
"country": "Canada" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Research Council Canada Ottawa", |
|
"location": { |
|
"country": "Canada" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Isar", |
|
"middle": [], |
|
"last": "Nejadgholi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Research Council Canada Ottawa", |
|
"location": { |
|
"country": "Canada" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Kathleen", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Fraser", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Research Council Canada Ottawa", |
|
"location": { |
|
"country": "Canada" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Motivations for methods in explainable artificial intelligence (XAI) often include detecting, quantifying and mitigating bias, and contributing to making machine learning models fairer. However, exactly how an XAI method can help in combating biases is often left unspecified. In this paper, we briefly review trends in explainability and fairness in NLP research, identify the current practices in which explainability methods are applied to detect and mitigate bias, and investigate the barriers preventing XAI methods from being used more widely in tackling fairness issues.", |
|
"pdf_parse": { |
|
"paper_id": "2022", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Motivations for methods in explainable artificial intelligence (XAI) often include detecting, quantifying and mitigating bias, and contributing to making machine learning models fairer. However, exactly how an XAI method can help in combating biases is often left unspecified. In this paper, we briefly review trends in explainability and fairness in NLP research, identify the current practices in which explainability methods are applied to detect and mitigate bias, and investigate the barriers preventing XAI methods from being used more widely in tackling fairness issues.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Trends in Natural Language Processing (NLP) mirror those in Machine Learning (ML): breakthroughs in deep neural network architectures, pretraining and fine-tuning methods, and a steady increase in the number of parameters led to impressive performance improvements for a wide variety of NLP tasks. However, these successes have been shadowed by the repeated discoveries that a high accuracy on the held-out test set does not always mean that the model is performing satisfactorily on other important criteria such as fairness, robustness and safety. These discoveries that models are adversarially manipulable (Zhang et al., 2020a) , show biases against underprivileged groups , and leak sensitive user information (Carlini et al., 2021) inspired a plethora of declarations on Responsible/Ethical AI (Morley et al., 2021) . Two of the common principles espoused in these documents are fairness and transparency.", |
|
"cite_spans": [ |
|
{ |
|
"start": 610, |
|
"end": 631, |
|
"text": "(Zhang et al., 2020a)", |
|
"ref_id": "BIBREF94" |
|
}, |
|
{ |
|
"start": 715, |
|
"end": 737, |
|
"text": "(Carlini et al., 2021)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 800, |
|
"end": 821, |
|
"text": "(Morley et al., 2021)", |
|
"ref_id": "BIBREF58" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Failures in fairness of models is often attributed, among other things, to the lack of transparency of modern AI models. The implicit argument is that, if biased predictions are due to faulty reasoning learned from biased data, then we need transparency in order to detect and understand this faulty reasoning. Hence, one approach to solving these problems is to develop methods that can peek inside the black-box, provide insights into the internal workings of the model, and identify whether the model is right for the right reasons.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "As a result, ensuring the fairness of AI systems is frequently cited as one of the main motivations behind XAI research (Doshi-Velez and Kim, 2017; Das and Rad, 2020; . However, it is not always clear how these methods can be applied in order to achieve fairer, less biased models. In this paper, we briefly summarize some XAI methods that are common in NLP research, the conceptualization, sources and metrics for unintended biases in NLP models, and some works that apply XAI methods to identify or mitigate these biases. Our review of the literature in this intersection reveals that applications of XAI methods to fairness and bias issues in NLP are surprisingly few, concentrated on a limited number of tasks, and often applied only to a few examples in order to illustrate the particular bias being studied. Based on our findings, we discuss some barriers to more widespread and effective application of XAI methods for debiasing NLP models, and some research directions to bridge the gap between these two areas.", |
|
"cite_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 147, |
|
"text": "(Doshi-Velez and Kim, 2017;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 148, |
|
"end": 166, |
|
"text": "Das and Rad, 2020;", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "With the success and widespread adaptation of black-box models for machine learning tasks, increasing research effort has been devoted to developing methods that might give humancomprehensible explanations for the behaviour of these models, helping developers and end-users to understand the reasoning behind the decisions of the model. Broadly speaking, explainability methods try to pinpoint the causes of a single prediction, a set of predictions, or all predictions of a model by identifying parts of the input, the model or the training data that have the most influence on the model outcome. The line dividing XAI methods, and methods that are developed more generally for understanding, analysis and evaluation of NLP methods beyond the standard accuracy metrics is not always clear cut. Many popular approaches such as probes (Hewitt and Liang, 2019; Voita and Titov, 2020) , contrast sets and checklists (Ribeiro et al., 2020) share many of their core motivations with XAI methods. Here, we present some of the most prominent works in XAI, and refer the reader to the survey by Danilevsky et al. (2020) for a more extensive overview of the field. We consider a method as an XAI method if the authors have framed it as such in the original presentation, and do not include others in our analysis.", |
|
"cite_spans": [ |
|
{ |
|
"start": 834, |
|
"end": 858, |
|
"text": "(Hewitt and Liang, 2019;", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 859, |
|
"end": 881, |
|
"text": "Voita and Titov, 2020)", |
|
"ref_id": "BIBREF84" |
|
}, |
|
{ |
|
"start": 1087, |
|
"end": 1111, |
|
"text": "Danilevsky et al. (2020)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explainable Natural Language Processing", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "A common categorization of explainability methods is whether they provide local or global explanations, and whether they are self-explaining or post-hoc (Guidotti et al., 2018; Adadi and Berrada, 2018) . The first distinction captures whether the explanations are given for individual instances (local) or explain the model behaviour on any input (global). Due to the complex nature of the data and the tasks common in NLP, the bulk of the XAI methods developed for or applicable to NLP models are local rather than global (Danilevsky et al., 2020) . The second distinction is related to how the explanations are generated. In self-explaining methods, the process of generating explanations is integrated into, or at least reliant on the internal structure of the model or the process of computing the model outcome. Because of this, self-explaining methods are often specific to the type of the model. On the other hand, post-hoc or model-agnostic methods only assume access to the input-output behaviour of the model, and construct explanations based on how changes to the different components of the prediction pipeline affect the outputs. Below, we outline some of the representative explainability methods used in NLP and categorize them along the two dimensions in Table 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 153, |
|
"end": 176, |
|
"text": "(Guidotti et al., 2018;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 177, |
|
"end": 201, |
|
"text": "Adadi and Berrada, 2018)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 523, |
|
"end": 548, |
|
"text": "(Danilevsky et al., 2020)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1271, |
|
"end": 1278, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Explainable Natural Language Processing", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Feature attribution methods, also referred to as feature importance or saliency maps, aim to determine the relative importance of each token in an input text for a given model prediction. The underlying assumption in each of these methods is that the more important a token is for a prediction, the more the output should change when this token is removed or changed. One way to estimate this is through the gradients of the output with respect to each input token as done by Simonyan et al. (2014) . Other methods have been developed to address some of the issues with the original approach such as local consistency (Sundararajan et al., 2017; Smilkov et al., 2017; Selvaraju et al., 2017; Shrikumar et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 476, |
|
"end": 498, |
|
"text": "Simonyan et al. (2014)", |
|
"ref_id": "BIBREF79" |
|
}, |
|
{ |
|
"start": 618, |
|
"end": 645, |
|
"text": "(Sundararajan et al., 2017;", |
|
"ref_id": "BIBREF82" |
|
}, |
|
{ |
|
"start": 646, |
|
"end": 667, |
|
"text": "Smilkov et al., 2017;", |
|
"ref_id": "BIBREF80" |
|
}, |
|
{ |
|
"start": 668, |
|
"end": 691, |
|
"text": "Selvaraju et al., 2017;", |
|
"ref_id": "BIBREF76" |
|
}, |
|
{ |
|
"start": 692, |
|
"end": 715, |
|
"text": "Shrikumar et al., 2017)", |
|
"ref_id": "BIBREF78" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explainable Natural Language Processing", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Rather than estimating the effect of perturbations through gradients, an alternative approach is to perturb the input text directly and observe its effects on the model outcome. Two of the most common methods in this class are LIME (Ribeiro et al., 2016) and SHAP (Lundberg and Lee, 2017). LIME generates perturbations by dropping subsets of tokens from the input text, and then fitting a linear classifier on these local perturbations. SHAP is inspired by Shapely values from cooperative game theory, and calculates feature importance as the fair division of a \"payoff\" from a game where the features cooperate to obtain the given model outcome. AllenNLP Interpret toolkit provides an implementation for both types of feature attribution methods, gradient based and input perturbation based, for six core NLP tasks, including text classification, masked language modeling, named entity recognition, and others.", |
|
"cite_spans": [ |
|
{ |
|
"start": 227, |
|
"end": 254, |
|
"text": "LIME (Ribeiro et al., 2016)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explainable Natural Language Processing", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "A third way to obtain feature attribution maps in architectures that use an attention mechanism (Bahdanau et al., 2015) is to look at the relative attention scores for each token (Xu et al., 2015; Choi et al., 2016) . Whether this approach provides valid explanations has been subject to heated debate (Jain and Wallace, 2019; Wiegreffe and Pinter, 2019) , however as Galassi et al. (2020) notes, the debate has mostly been centered around the use of attention scores as local explanations. There has also been some works that use attention scores for providing global explanations based on the syntactic structures that the model attends to (Clark et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 119, |
|
"text": "(Bahdanau et al., 2015)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 179, |
|
"end": 196, |
|
"text": "(Xu et al., 2015;", |
|
"ref_id": "BIBREF90" |
|
}, |
|
{ |
|
"start": 197, |
|
"end": 215, |
|
"text": "Choi et al., 2016)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 302, |
|
"end": 326, |
|
"text": "(Jain and Wallace, 2019;", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 327, |
|
"end": 354, |
|
"text": "Wiegreffe and Pinter, 2019)", |
|
"ref_id": "BIBREF88" |
|
}, |
|
{ |
|
"start": 368, |
|
"end": 389, |
|
"text": "Galassi et al. (2020)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 642, |
|
"end": 662, |
|
"text": "(Clark et al., 2019)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explainable Natural Language Processing", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Extractive rationales (DeYoung et al., 2020) are snippets of the input text that trigger the original prediction. They are similar in spirit to feature attribution methods, however in rationales the attribution is usually binary rather than a real-valued score, and continuous subsets of the text are chosen rather than each token being treated individually. Rationales can also be obtained from humans as explanations of human annotations rather than the model decisions, and used as an additional signal to guide the model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explainable Natural Language Processing", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Counterfactual explanations are new instances that are obtained by applying minimal changes to an input instance in order to change the model output. Counterfactuals are inspired by notions in causality, and aim to answer the question: \"What would need to change for the outcome to be different?\" Two examples of counterfactual explanations in NLP are Polyjuice (Wu et al., 2021) and MiCE (Ross et al., 2021) . Polyjuice is model agnostic, and consists of a generative model trained on existing, human generated counterfactual data sets. It also allows finer control over the types of counterfactuals by allowing the user to choose which parts of the input to perturb, and how to perturb them with control codes such as \"replace\" or \"negation\". MiCE uses model gradients to iteratively choose and mask the important tokens, and a generative model to change the chosen tokens so that the end prediction is flipped.", |
|
"cite_spans": [ |
|
{ |
|
"start": 362, |
|
"end": 379, |
|
"text": "(Wu et al., 2021)", |
|
"ref_id": "BIBREF89" |
|
}, |
|
{ |
|
"start": 389, |
|
"end": 408, |
|
"text": "(Ross et al., 2021)", |
|
"ref_id": "BIBREF72" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explainable Natural Language Processing", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "There are also methods that try to pinpoint which examples in the training data have the most influence on the prediction. The most common approach for this is Influence Functions (Koh and Liang, 2017; Han et al., 2020) , where the goal is to efficiently estimate how much removing an example from the data set and retraining the model would change the prediction on a particular input. An alternative is Representer Point Selection (Yeh et al., 2018) , which applies to a more limited set of architectures, and aims to express the logits of an input as a weighted sum of all the training data points.", |
|
"cite_spans": [ |
|
{ |
|
"start": 202, |
|
"end": 219, |
|
"text": "Han et al., 2020)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 433, |
|
"end": 451, |
|
"text": "(Yeh et al., 2018)", |
|
"ref_id": "BIBREF92" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explainable Natural Language Processing", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Some explainability methods are designed to provide global explanations using higher level, semantic concepts. Feder et al. (2021b) use counterfactual language models to provide causal explanations based on high-level concepts. Their method contrasts the original model representations with alternative pre-trained representations that are adversarially trained not to capture the chosen highlevel concept, so that the total causal effect of the concept on the classification decisions can be estimated. adapt Testing Concept Activation Vector (TCAV) method of , originally developed for computer vision, to explain the generalization abilities of a hate speech classifier. In their approach, the concepts are defined through a small set of human chosen examples, and the method quantifies how strongly the concept is associated with a given label.", |
|
"cite_spans": [ |
|
{ |
|
"start": 111, |
|
"end": 131, |
|
"text": "Feder et al. (2021b)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explainable Natural Language Processing", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Finally, some methods produce explanations in the form of rules. One method in this category is Anchors (Ribeiro et al., 2018a), where the model searches for a set of tokens in a particular input text that predicts the given outcome with high precision. Although Anchors is a local explainability method in that it gives explanations on individual input instances, the generated explanations are globally applicable. SEAR (Ribeiro et al., 2018b), a global explainability method, finds universal replacement rules that, if applied to an input, adversarially change the prediction while keeping the semantics of the input the same.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explainable Natural Language Processing", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Unintended biases in NLP is a complex and multifaceted issue that spans various undesirable model behaviours that cause allocational and representational harms to certain demographic groups (Blodgett et al., 2020) . When the demographic group is already marginalized and underprivileged in society, biases in NLP models can further contribute to the marginalization and the unfair allocation of resources. Examples include performance disparities between standard and African American English (Blodgett and O'Connor, 2017), stereotypical associations between gendered pronouns and occupations in coreference resolution (Rudinger et al., 2018) and machine translation (Stanovsky et al., 2019) , and false positives in hate speech detection on innocuous tweets mentioning demographic attributes (R\u00f6ttger et al., 2021) . In this section, we review some of the most popular methods and metrics to identify such biases. For a more comprehensive coverage, see recent surveys by Mehrabi et al. (2021) and Caton and Haas (2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 190, |
|
"end": 213, |
|
"text": "(Blodgett et al., 2020)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 619, |
|
"end": 642, |
|
"text": "(Rudinger et al., 2018)", |
|
"ref_id": "BIBREF74" |
|
}, |
|
{ |
|
"start": 667, |
|
"end": 691, |
|
"text": "(Stanovsky et al., 2019)", |
|
"ref_id": "BIBREF81" |
|
}, |
|
{ |
|
"start": 793, |
|
"end": 815, |
|
"text": "(R\u00f6ttger et al., 2021)", |
|
"ref_id": "BIBREF73" |
|
}, |
|
{ |
|
"start": 972, |
|
"end": 993, |
|
"text": "Mehrabi et al. (2021)", |
|
"ref_id": "BIBREF56" |
|
}, |
|
{ |
|
"start": 998, |
|
"end": 1019, |
|
"text": "Caton and Haas (2020)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fairness and Bias in NLP Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Most works in ML fairness literature assume that biases in machine learning models originate from misrepresentations in training datasets and merely reflect the societal biases. However, as Hooker (2021) explains, design choices can amplify the societal biases, and automated data processing can lead to systematic un-precedented harms. Shah et al. (2020) identify five sources for bias in NLP models. Selection bias and label bias are biases that originate in the training data. The former refers to biases that are created when choosing which data points to annotate, and includes underrepresentation of some demographic groups as well as misrepresentation due to spurious correlations. The latter refers to biases introduced due to the annotation process, such as when annotators are less familiar with or biased against text generated by certain groups, causing more annotation errors for some groups than others. Model bias are biases that are due to model structure, and are responsible for the over-amplification of discrepancies that are observed in training data. Semantic bias refers to biases introduced from the pre-trained representations, and include representational harms such as stereotypical associations. Finally, bias in research design covers the larger issues of uneven allocation of research efforts across different groups, dialects, languages and geographic areas.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fairness and Bias in NLP Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Research in fair ML has developed a number of metrics to quantify the biases in an ML model. These metrics are usually classified as group fairness metrics and individual fairness metrics (Castelnovo et al., 2022; Czarnowska et al., 2021) . Group fairness metrics focus on quantifying the performance disparity between different demographic groups. Some examples are demographic parity, which measures the difference in the positive prediction rates across groups, predictive parity, which measures the difference in precision across groups, and equality of odds, which measures the differences between false positive and false negative rates across groups. Individual fairness metrics are based on the idea that the model should behave the same for similar examples re-gardless of the value of a protected attribute. A refinement to this approach is counterfactual fairness, where the criteria for fairness is that the model decision remains the same for a given individual in a counterfactual world where that individual belonged to a different demographic group. In NLP, this notion often appears as counterfactual token fairness (Garg et al., 2019) , and is operationalized through test suites that include variations of the same text where some tokens associated with certain social groups are replaced with others, and the bias of the model is measured by the performance disparity between the pairs (Kiritchenko and Mohammad, 2018; .", |
|
"cite_spans": [ |
|
{ |
|
"start": 188, |
|
"end": 213, |
|
"text": "(Castelnovo et al., 2022;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 214, |
|
"end": 238, |
|
"text": "Czarnowska et al., 2021)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1133, |
|
"end": 1152, |
|
"text": "(Garg et al., 2019)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 1406, |
|
"end": 1438, |
|
"text": "(Kiritchenko and Mohammad, 2018;", |
|
"ref_id": "BIBREF48" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fairness and Bias in NLP Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Both group fairness metrics and individual fairness metrics are instances of outcome fairness: whether a model is fair is determined solely on the outcomes with respect to various groups, regardless of how the algorithm produced those observed outcomes. 1 There is a complementary notion called procedural fairness that is often considered in organizational settings (Blader and Tyler, 2003) , which aims to capture whether the processes that were followed to obtain the outcome are fair. In ML, this translates to whether the model's internal reasoning process is fair to different groups or individuals (Grgi\u0107-Hla\u010da et al., 2018; Morse et al., 2021) . For example, outcome fairness for a resume sorting system might be implemented as ensuring that the model has the same acceptance rates or the same precision and recall for groups defined by race, gender, or other demographic attributes. A procedural fairness approach, on the other hand, might aim to ensure that the decision making process of the system only relies on skill-related features, and not features that are strongly associated with demographic attributes, such as names and pronouns. The distinction between procedural and outcome fairness relates to different kinds of discrimination outlined in anti-discrimination laws, namely disparate treatment and disparate impact (Barocas and Selbst, 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 367, |
|
"end": 391, |
|
"text": "(Blader and Tyler, 2003)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 605, |
|
"end": 631, |
|
"text": "(Grgi\u0107-Hla\u010da et al., 2018;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 632, |
|
"end": 651, |
|
"text": "Morse et al., 2021)", |
|
"ref_id": "BIBREF59" |
|
}, |
|
{ |
|
"start": 1339, |
|
"end": 1365, |
|
"text": "(Barocas and Selbst, 2016)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fairness and Bias in NLP Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Fairness metrics have originally been developed for applications where the social group membership is known, for example in healthcare related tasks. An issue with applying these to NLP tasks is that either the demographic information is not available and needs to be estimated, or some auxiliary signal, such as the mention of a target group or the gender of the pronoun, needs to be used. However, inferring people's social attributes from their data raises important ethical concerns in terms of privacy violations, lack of meaningful consent, and intersectional invisibility (Mohammad, 2022). Since determining whether the text is about a certain identity group is easier than whether it is produced by a certain identity group, there are more works investigating the former than the latter. An exception to this is the studies on disparate performance of models on certain dialects such as African American English (AAE) (Sap et al., 2019; Blodgett and O'Connor, 2017) . This is possible due to the existence of a dialect identification tool for AAE, which was trained by pairing geo-located tweets with US census data on race (Blodgett et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 926, |
|
"end": 944, |
|
"text": "(Sap et al., 2019;", |
|
"ref_id": "BIBREF75" |
|
}, |
|
{ |
|
"start": 945, |
|
"end": 973, |
|
"text": "Blodgett and O'Connor, 2017)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1132, |
|
"end": 1155, |
|
"text": "(Blodgett et al., 2016)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fairness and Bias in NLP Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "One source of bias that the NLP community has devoted significant research effort to is word embeddings and pre-trained language models (Bolukbasi et al., 2016; Zhao et al., 2019) , which Shah et al. (2020) characterizes as semantic bias. Although it is not framed as such, this can be seen as a particular global explanation for biases that the models demonstrate in downstream tasks. However, the effectiveness of these methods has recently been questioned by Goldfarb-Tarrant et al. (2021) who found that there is no correlation between intrinsic bias metrics obtained by embedding association tests, and extrinsic bias metrics on downstream tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 136, |
|
"end": 160, |
|
"text": "(Bolukbasi et al., 2016;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 161, |
|
"end": 179, |
|
"text": "Zhao et al., 2019)", |
|
"ref_id": "BIBREF97" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fairness and Bias in NLP Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "To determine the uses of explainability methods in fair NLP, we search the ACL Anthology for papers that cite the explainability methods listed in Section 2, and that include keywords, \"fair\", \"fairness\", or \"bias\". We further exclude the papers that focus on other types of biases such as inductive bias, or bias terms in the description of the architecture. Our results show that although there are a number of papers that mention unintended or societal biases as wider motivations to contextualize the work (e.g., by Zylberajch et al. (2021) ), only a handful of them apply explainability methods to uncover or investigate biases. All of the works we identify in this category use feature attribution methods, and except that of Aksenov et al. (2021) , employ them for demonstration purposes on a few examples. Although our methodology excludes works that are published in venues other than ACL conferences and workshops, we believe that it gives a good indication of the status of XAI in fairness and bias research in NLP. Mosca et al. (2021) use SHAP to demonstrate that adding user features to a hate speech detection model reduces biases that are due to spurious correlations in text, but introduces other biases based on user information. Wich et al. (2020) also apply SHAP to two example inputs in order to illustrate the political bias of a hate speech model. Aksenov et al. (2021) aggregate attention scores from BERT into global explanations in order to identify which words are most indicative of political bias.", |
|
"cite_spans": [ |
|
{ |
|
"start": 520, |
|
"end": 544, |
|
"text": "Zylberajch et al. (2021)", |
|
"ref_id": "BIBREF98" |
|
}, |
|
{ |
|
"start": 732, |
|
"end": 753, |
|
"text": "Aksenov et al. (2021)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1027, |
|
"end": 1046, |
|
"text": "Mosca et al. (2021)", |
|
"ref_id": "BIBREF60" |
|
}, |
|
{ |
|
"start": 1247, |
|
"end": 1265, |
|
"text": "Wich et al. (2020)", |
|
"ref_id": "BIBREF87" |
|
}, |
|
{ |
|
"start": 1370, |
|
"end": 1391, |
|
"text": "Aksenov et al. (2021)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Applications of XAI in Fair NLP", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Some works beyond the papers that our search methodology uncovered on the intersection of fairness for NLP and XAI are that of Kennedy et al. (2020) , which uses Sampling and Occlusion algorithm of Jin et al. (2019) to detect bias toward identity terms in hate speech classifiers, and that of Mathew et al. (2021) , which shows that using human rationales as an additional signal in training hate speech detection models reduces the bias of the model towards target communities. Prabhakaran et al. (2019) target individual fairness, and develop a framework to evaluate model bias against particular named entities with a perturbation based analysis. Although they do not frame their model as such, the automatically generated perturbations can be categorized as counterfactuals. Balk\u0131r et al. (2022) suggest the use of two metrics-necessity and sufficiency-as feature attribution scores, and apply their method to uncover different kinds of bias against protected group tokens in hate speech and abusive language detection models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 127, |
|
"end": 148, |
|
"text": "Kennedy et al. (2020)", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 198, |
|
"end": 215, |
|
"text": "Jin et al. (2019)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 293, |
|
"end": 313, |
|
"text": "Mathew et al. (2021)", |
|
"ref_id": "BIBREF55" |
|
}, |
|
{ |
|
"start": 779, |
|
"end": 799, |
|
"text": "Balk\u0131r et al. (2022)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Applications of XAI in Fair NLP", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "As summarized in Table 2 , almost all these works focus exclusively on hate speech detection, and use local feature attribution methods. The range of bias types is also quite limited. This demonstrates the very narrow context in which explainability has been linked to fairness in NLP.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 17, |
|
"end": 24, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Applications of XAI in Fair NLP", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "There are also some works beyond NLP that use XAI to improve fairness of ML models. Zhang and Bareinboim (2018) , Parafita and Vitria (2021) and Grabowicz et al. (2022) leverage methods from causal inference to both model the causes of the given prediction and provide explanations, and to ensure that protected attributes are not influencing the model decisions through unacceptable causal chains. The disadvantage of these models is that they require an explicit model of the causal relations between features, which is a difficult task for textual data (Feder et al., 2021a) . Pradhan et al. (2022) also suggest a causality inspired method that identifies subsets of data responsible for particular biases of the model. Begley et al. (2020) extend Shapely values to attribute the overall unfairness of an algorithm to individual input features. The main limitation of all these methods is that they are currently only applicable to low dimensional tabular data. How to extend these methods to explain the unfairness of NLP models remains an open research problem. As abstract frameworks for connecting XAI to fair ML, P et al. (2021) outline potential synergies between the two research areas. Alikhademi et al. (2021) enumerate different sources of bias, and discuss how XAI methods can help identify and mitigate these.", |
|
"cite_spans": [ |
|
{ |
|
"start": 84, |
|
"end": 111, |
|
"text": "Zhang and Bareinboim (2018)", |
|
"ref_id": "BIBREF93" |
|
}, |
|
{ |
|
"start": 114, |
|
"end": 140, |
|
"text": "Parafita and Vitria (2021)", |
|
"ref_id": "BIBREF63" |
|
}, |
|
{ |
|
"start": 145, |
|
"end": 168, |
|
"text": "Grabowicz et al. (2022)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 556, |
|
"end": 577, |
|
"text": "(Feder et al., 2021a)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 580, |
|
"end": 601, |
|
"text": "Pradhan et al. (2022)", |
|
"ref_id": "BIBREF66" |
|
}, |
|
{ |
|
"start": 723, |
|
"end": 743, |
|
"text": "Begley et al. (2020)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1117, |
|
"end": 1136, |
|
"text": "ML, P et al. (2021)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1197, |
|
"end": 1221, |
|
"text": "Alikhademi et al. (2021)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Applications of XAI in Fair NLP", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The framework of causality (Pearl, 2009) is invoked both in fairness and explainability literature. The promise of causality is that it goes beyond correlations, and characterizes the causes behind observations. This is relevant to conceptualizing fairness since, as Loftus et al. (2018) argue, there are situations that are intuitively different from a fairness point of view, but that purely observational criteria cannot distinguish. Causality tries to capture the notion of causes of an outcome in terms of hypothetical interventions: if something is a true cause of a given outcome, then intervening on this variable will change the outcome. This notion of intervention is useful for both detecting biases and for choosing mitigation strategies. Causal interventions are also the fundamental notion behind counterfactual examples in XAI. It is easier for humans to identify the cause of a prediction if they are shown minimally different instances that result in opposite predictions. Hence, causal explanations can serve as proofs of bias or other undesirable correlations to developers and to end-users.", |
|
"cite_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 40, |
|
"text": "(Pearl, 2009)", |
|
"ref_id": "BIBREF64" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "XAI for Fair NLP through Causality and Robustness", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Going beyond correlations in data and capturing causal relations is also an effective way to increase robustness and generalization in machine learning models. As Kaushik et al. (2020) argue, causal correlations are invariant to differing data distributions, while non-causal correlations are much more context and dataset specific. Hence, models that can differentiate between the two and rely solely on casual correlations while ignoring the non-causal ones will perform well beyond the strict i.i.d. setting.", |
|
"cite_spans": [ |
|
{ |
|
"start": 163, |
|
"end": 184, |
|
"text": "Kaushik et al. (2020)", |
|
"ref_id": "BIBREF45" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "XAI for Fair NLP through Causality and Robustness", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Non-causal, surface level correlations are often referred to as spurious correlations, and a common use case of XAI methods for developers is to facilitate the identification of such patterns. A common motivating argument in XAI methods for debugging NLP models Zylberajch et al., 2021) , as well as counterfactual data augmentation methods (Kaushik et al., 2020; Balashankar et al., 2021; Yang et al., 2021) , is that unintended biases are due to the model picking up such spurious associations, and XAI methods which can be used to improve the robustness of a model against these spurious patterns will also improve the fairness of a model as a side effect. There is indeed evidence that methods for robustness also reduce unintended bias in NLP models (Adragna et al., 2020; Pruksachatkun et al., 2021) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 262, |
|
"end": 286, |
|
"text": "Zylberajch et al., 2021)", |
|
"ref_id": "BIBREF98" |
|
}, |
|
{ |
|
"start": 341, |
|
"end": 363, |
|
"text": "(Kaushik et al., 2020;", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 364, |
|
"end": 389, |
|
"text": "Balashankar et al., 2021;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 390, |
|
"end": 408, |
|
"text": "Yang et al., 2021)", |
|
"ref_id": "BIBREF91" |
|
}, |
|
{ |
|
"start": 755, |
|
"end": 777, |
|
"text": "(Adragna et al., 2020;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 778, |
|
"end": 805, |
|
"text": "Pruksachatkun et al., 2021)", |
|
"ref_id": "BIBREF67" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "XAI for Fair NLP through Causality and Robustness", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "However, these methods are limited in that they can address unintended biases only insofar as the biases are present and identifiable as token-level spurious correlations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "XAI for Fair NLP through Causality and Robustness", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "As we saw in Sec. 4 and 5, only a few studies to date have attempted to apply explainability techniques in order to uncover biases in NLP systems, to a limited extent. In this section, we discuss some possible reasons for a seeming lack of progress in this area and outline promising directions for future research.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Challenges and Future Directions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Local explainability methods rely on the user to identify examples that might reveal bias. One issue in preventing wider adoption of XAI methods in fair NLP stems from the local nature of most explanation methods applicable to NLP models. An important step in identifying fairness problems within a model is identifying the data points where these issues might manifest. Since local explainability methods give explanations on particular data points, it is left to the user how to pick the instances to examine. This necessitates the user to first decide what biases to search for before employing XAI methods, limiting their usefulness for identifying unknown biases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Challenges and Future Directions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Even if an issue can be identified with a local XAI method, it is difficult to know to what extent the insight can be generalized. This is an issue because it is often essential to know what subsets of the input are affected by the identified biased behaviour in order to apply effective mitigation strategies. Some methods such as Anchors mitigate this problem by specifying the set of examples an explanation applies to. Other approaches use abstractions such as high-level concepts (Feder et al., 2021b; to provide more generalizable insights. Principled methods to aggregate local explanations into more global and actionable insights are needed to make local explainability methods better suited to identifying and mitigating unintended biases in NLP models. Also, future NLP research could explore global explainability methods that have been used to uncover unknown biases (Tan et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 485, |
|
"end": 506, |
|
"text": "(Feder et al., 2021b;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 880, |
|
"end": 898, |
|
"text": "(Tan et al., 2018)", |
|
"ref_id": "BIBREF83" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local explanations are not easily generalizable.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Not all undesirable biases are surface-level or non-causal. In the motivation for XAI methods, there is strong emphasis on identifying token-level correlations caused by sampling bias or label bias. Although methods that target these patterns are shown to also improve the fairness of models, not all sources of bias fit well into this characterization (Hooker, 2021) , and hence might be difficult to detect with XAI methods that provide token-level explanations. For example, Bagdasaryan et al. (2019) show that the cost of differential privacy methods in decreasing the accuracy of deep learning NLP models, is much higher for underrepresented subgroups. A rigorous study of a model's structure and training process is required to discover such bias sources.", |
|
"cite_spans": [ |
|
{ |
|
"start": 353, |
|
"end": 367, |
|
"text": "(Hooker, 2021)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 478, |
|
"end": 503, |
|
"text": "Bagdasaryan et al. (2019)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local explanations are not easily generalizable.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Another issue that is common in works that approach fairness through robustness is the characterization of unintended biases as non-causal associations in data (Kaushik et al., 2020; Adragna et al., 2020) . In fact, it can be argued that many of the undesirable correlations observed in data are causal in nature, and will likely hold in a wide variety of different data distributions. For example, correlations between different genders and occupations-which arguably is the source of the occupational gender stereotypes picked up by NLP models (Rudinger et al., 2018) -are not due to unrepresentative samples or random correlations in the data, but rather underlying systemic biases in the distribution of occupations in the real world. To ensure a fair system, researchers must make a normative decision (Blodgett et al., 2020) that they do not want to reproduce this particular correlation in their model. This suggests that there may be inherent limitations to the ability of XAI methods to improve fairness of NLP methods through improving model robustness and generalization.", |
|
"cite_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 182, |
|
"text": "(Kaushik et al., 2020;", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 183, |
|
"end": 204, |
|
"text": "Adragna et al., 2020)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 546, |
|
"end": 569, |
|
"text": "(Rudinger et al., 2018)", |
|
"ref_id": "BIBREF74" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local explanations are not easily generalizable.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Some biases can be difficult for humans to recognize. Even for biases that could be characterized in terms of surface-level correlations, XAI methods rely on humans to recognize what an undesirable correlation is, but biased models are often biased in subtle ways. For example, if the dialect bias in a hate speech detection system is mostly mediated by false positives on the uses of reclaimed slurs, this might seem like a good justification to a user who is unfamiliar with this phenomenon (Sap et al., 2019) . More studies with human subjects are needed to investigate whether humans can recognise unintended biases that cause fairness issues through explainability methods as well as they can recognise simpler data biases.", |
|
"cite_spans": [ |
|
{ |
|
"start": 493, |
|
"end": 511, |
|
"text": "(Sap et al., 2019)", |
|
"ref_id": "BIBREF75" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local explanations are not easily generalizable.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Explainability methods are susceptible to fairwashing. An issue that has repeatedly been raised with respect to XAI methods is the potential for \"fairwashing\" biased models. This refers to techniques that adversarially manipulate explanations in order to obscure the model's reliance on protected attributes. Fairwashing has been shown possible in rule lists (A\u00efvodji et al., 2019) , and both gradient based and perturbation based feature attribution methods (Dimanov et al., 2020; Anders et al., 2020) . This relates to the wider issue of the faithfulness of an explainability method: if there is no guarantee that the explanations reflect the actual inner workings of the model, the explanations are of little use. One solution to this problem would be to extend certifiable robustness (Cohen et al., 2019; Ma et al., 2021) beyond the model itself, and develop certifiably faithful explainability methods with proofs that a particular way of testing for bias cannot be adversarially manipulated. Another approach to mitigate this issue is to provide the levels of uncertainty in the explanations, giving the end-user more information on whether to trust the generated explanation (Zhang et al., 2019) , or other ways to calibrate user trust to the quality of the provided explanations (Zhang et al., 2020b) . However, the effectiveness of these methods depends substantially on whether the model's predicted probabilities are well-calibrated to the true outcome probabilities. Certain machine learning models do not meet this criterion. Specifically, the commonly used deep learning models have been shown to be over-confident in their predictions (Guo et al., 2017) . Calibration of uncertainties is a necessary prerequisite, should they be used to calibrate user trust, as over-confident predictions can be themselves a source of mistrust.", |
|
"cite_spans": [ |
|
{ |
|
"start": 359, |
|
"end": 381, |
|
"text": "(A\u00efvodji et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 459, |
|
"end": 481, |
|
"text": "(Dimanov et al., 2020;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 482, |
|
"end": 502, |
|
"text": "Anders et al., 2020)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 788, |
|
"end": 808, |
|
"text": "(Cohen et al., 2019;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 809, |
|
"end": 825, |
|
"text": "Ma et al., 2021)", |
|
"ref_id": "BIBREF54" |
|
}, |
|
{ |
|
"start": 1182, |
|
"end": 1202, |
|
"text": "(Zhang et al., 2019)", |
|
"ref_id": "BIBREF95" |
|
}, |
|
{ |
|
"start": 1287, |
|
"end": 1308, |
|
"text": "(Zhang et al., 2020b)", |
|
"ref_id": "BIBREF96" |
|
}, |
|
{ |
|
"start": 1650, |
|
"end": 1668, |
|
"text": "(Guo et al., 2017)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local explanations are not easily generalizable.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Fair AI is focused on outcome fairness, but XAI is motivated by procedural fairness. Finally, it appears that there is a larger conceptual gap between the notions of fairness that the ethical AI community has developed, and the notion of fairness implicitly assumed in motivations for XAI methods. Namely, almost all the fairness metrics developed in Fair ML literature aim to formalize outcome fairness in that they are process-agnostic, and quantify the fairness of a model on its observed outcomes only. The type of fairness that motivates XAI, on the other hand, is closer to the concept of procedural fairness: XAI aims to elucidate the internal reasoning of a model, and make it trans-parent whether there are any parts of the decision process that could be deemed unfair.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local explanations are not easily generalizable.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We observe that due to the lack of better definitions of procedural fairness, the most common way XAI methods are applied to fairness issues is to check whether the model uses features that are explicitly associated with protected attributes (e.g., gendered pronouns). This practice promotes a similar ideal with \"fairness through unawareness\" in that it aims to place the veil of ignorance about the protected attributes not at the level of the data fed into the model, but into the model itself. In other words, the best one could do with these techniques seem to be to develop \"colourblind\" models which, even if they receive explicit information about protected attributes in their input, ignore this information when making their decisions. Although it is simple and intuitive, we suspect that such an approach has similar issues with the much criticized \"fairness through unawareness\" approach (Kusner et al., 2017; Morse et al., 2021) . More clearly specified notions of procedural fairness, as well as precise quantitative metrics similar to those that have been developed for outcome fairness, are needed in order to guide the development of XAI methods that can make ML models fairer.", |
|
"cite_spans": [ |
|
{ |
|
"start": 900, |
|
"end": 921, |
|
"text": "(Kusner et al., 2017;", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 922, |
|
"end": 941, |
|
"text": "Morse et al., 2021)", |
|
"ref_id": "BIBREF59" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Local explanations are not easily generalizable.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Publications in explainable NLP often cite fairness as a motivation for the work, but the exact relationship between the two concepts is typically left unspecified. Most current XAI methods provide explanations on a local level through post-hoc processing, leaving open questions about how to automatically identify fairness issues in individual explanations, and how to generalize from local explanations to infer systematic model bias. Although the two fields of explainability and fairness feel intuitively linked, a review of the literature revealed a surprisingly small amount of work at the intersection. We have discussed some of the conceptual underpinnings shared by both these fields as well as practical challenges to uniting them, and proposed areas for future research.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Outcome fairness is also referred to as distributive fairness in this literature.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Peeking inside the black-box: A survey on explainable artificial intelligence (XAI)", |
|
"authors": [ |
|
{ |
|
"first": "Amina", |
|
"middle": [], |
|
"last": "Adadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammed", |
|
"middle": [], |
|
"last": "Berrada", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "IEEE Access", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "52138--52160", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amina Adadi and Mohammed Berrada. 2018. Peek- ing inside the black-box: A survey on explainable artificial intelligence (XAI). IEEE Access, 6:52138- 52160.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Fairness and robustness in invariant learning: A case study in toxicity classification", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Adragna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elliot", |
|
"middle": [], |
|
"last": "Creager", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Madras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Zemel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the NeurIPS 2020 Workshop on Algorithmic Fairness through the Lens of Causality and Interpretability", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert Adragna, Elliot Creager, David Madras, and Richard Zemel. 2020. Fairness and robustness in invariant learning: A case study in toxicity classifica- tion. In Proceedings of the NeurIPS 2020 Workshop on Algorithmic Fairness through the Lens of Causal- ity and Interpretability.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Fairwashing: the risk of rationalization", |
|
"authors": [ |
|
{ |
|
"first": "Ulrich", |
|
"middle": [], |
|
"last": "A\u00efvodji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiromi", |
|
"middle": [], |
|
"last": "Arai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olivier", |
|
"middle": [], |
|
"last": "Fortineau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S\u00e9bastien", |
|
"middle": [], |
|
"last": "Gambs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Satoshi", |
|
"middle": [], |
|
"last": "Hara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alain", |
|
"middle": [], |
|
"last": "Tapp", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "161--170", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ulrich A\u00efvodji, Hiromi Arai, Olivier Fortineau, S\u00e9bastien Gambs, Satoshi Hara, and Alain Tapp. 2019. Fairwashing: the risk of rationalization. In Proceedings of the International Conference on Ma- chine Learning, pages 161-170.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Fine-grained classification of political bias in German news: A data set and initial experiments", |
|
"authors": [ |
|
{ |
|
"first": "Dmitrii", |
|
"middle": [], |
|
"last": "Aksenov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Bourgonje", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karolina", |
|
"middle": [], |
|
"last": "Zaczynska", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Malte", |
|
"middle": [], |
|
"last": "Ostendorff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [ |
|
"Moreno" |
|
], |
|
"last": "Schneider", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georg", |
|
"middle": [], |
|
"last": "Rehm", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 5th Workshop on Online Abuse and Harms (WOAH 2021)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "121--131", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dmitrii Aksenov, Peter Bourgonje, Karolina Zaczyn- ska, Malte Ostendorff, Julian Moreno Schneider, and Georg Rehm. 2021. Fine-grained classification of political bias in German news: A data set and initial experiments. In Proceedings of the 5th Workshop on Online Abuse and Harms (WOAH 2021), pages 121-131.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Can explainable AI explain unfairness? A framework for evaluating explainable AI", |
|
"authors": [ |
|
{ |
|
"first": "Kiana", |
|
"middle": [], |
|
"last": "Alikhademi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brianna", |
|
"middle": [], |
|
"last": "Richardson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emma", |
|
"middle": [], |
|
"last": "Drobina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Gilbert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2106.07483" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kiana Alikhademi, Brianna Richardson, Emma Drobina, and Juan E Gilbert. 2021. Can explainable AI explain unfairness? A framework for evaluating explainable AI. arXiv preprint arXiv:2106.07483.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Fairwashing explanations with off-manifold detergent", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Anders", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Plamen", |
|
"middle": [], |
|
"last": "Pasliev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ann-Kathrin", |
|
"middle": [], |
|
"last": "Dombrowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus-Robert", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pan", |
|
"middle": [], |
|
"last": "Kessel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "314--323", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher Anders, Plamen Pasliev, Ann-Kathrin Dom- browski, Klaus-Robert M\u00fcller, and Pan Kessel. 2020. Fairwashing explanations with off-manifold deter- gent. In Proceedings of the International Conference on Machine Learning, pages 314-323.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Differential privacy has disparate impact on model accuracy", |
|
"authors": [ |
|
{ |
|
"first": "Eugene", |
|
"middle": [], |
|
"last": "Bagdasaryan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omid", |
|
"middle": [], |
|
"last": "Poursaeed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vitaly", |
|
"middle": [], |
|
"last": "Shmatikov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eugene Bagdasaryan, Omid Poursaeed, and Vitaly Shmatikov. 2019. Differential privacy has disparate impact on model accuracy. Advances in Neural In- formation Processing Systems, 32.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Neural machine translation by jointly learning to align and translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyung", |
|
"middle": [ |
|
"Hyun" |
|
], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 3rd International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyung Hyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In Proceedings of the 3rd International Conference on Learning Represen- tations.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Can we improve model robustness through secondary attribute counterfactuals?", |
|
"authors": [ |
|
{ |
|
"first": "Ananth", |
|
"middle": [], |
|
"last": "Balashankar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuezhi", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Packer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nithum", |
|
"middle": [], |
|
"last": "Thain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ed", |
|
"middle": [], |
|
"last": "Chi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Beutel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4701--4712", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ananth Balashankar, Xuezhi Wang, Ben Packer, Nithum Thain, Ed Chi, and Alex Beutel. 2021. Can we im- prove model robustness through secondary attribute counterfactuals? In Proceedings of the 2021 Con- ference on Empirical Methods in Natural Language Processing, pages 4701-4712.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Necessity and sufficiency for explaining text classifiers: A case study in hate speech detection", |
|
"authors": [ |
|
{ |
|
"first": "Esma", |
|
"middle": [], |
|
"last": "Balk\u0131r", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isar", |
|
"middle": [], |
|
"last": "Nejadgholi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Kathleen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Fraser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "Proceedings of the Annual Conference of the North American Chapter of the Association for Computational Linguistics (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Esma Balk\u0131r, Isar Nejadgholi, Kathleen C Fraser, and Svetlana Kiritchenko. 2022. Necessity and suffi- ciency for explaining text classifiers: A case study in hate speech detection. In Proceedings of the Annual Conference of the North American Chapter of the Association for Computational Linguistics (NAACL), Seattle, WA, USA.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Big data's disparate impact", |
|
"authors": [ |
|
{ |
|
"first": "Solon", |
|
"middle": [], |
|
"last": "Barocas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Andrew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Selbst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "CALIFORNIA LAW REVIEW", |
|
"volume": "104", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Solon Barocas and Andrew D Selbst. 2016. Big data's disparate impact. CALIFORNIA LAW REVIEW, 104:671.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Explainability for fair machine learning", |
|
"authors": [ |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Begley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tobias", |
|
"middle": [], |
|
"last": "Schwedes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Frye", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Feige", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2010.07389" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom Begley, Tobias Schwedes, Christopher Frye, and Ilya Feige. 2020. Explainability for fair machine learning. arXiv preprint arXiv:2010.07389.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "What constitutes fairness in work settings? a four-component model of procedural justice", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Steven", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom R", |
|
"middle": [], |
|
"last": "Blader", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Tyler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Human Resource Management Review", |
|
"volume": "13", |
|
"issue": "1", |
|
"pages": "107--126", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steven L Blader and Tom R Tyler. 2003. What con- stitutes fairness in work settings? a four-component model of procedural justice. Human Resource Man- agement Review, 13(1):107-126.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Language (technology) is power: A critical survey of \"bias\" in NLP", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Su Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Solon", |
|
"middle": [], |
|
"last": "Blodgett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hal", |
|
"middle": [], |
|
"last": "Barocas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iii", |
|
"middle": [], |
|
"last": "Daum\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanna", |
|
"middle": [], |
|
"last": "Wallach", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5454--5476", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Su Lin Blodgett, Solon Barocas, Hal Daum\u00e9 III, and Hanna Wallach. 2020. Language (technology) is power: A critical survey of \"bias\" in NLP. In Pro- ceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 5454- 5476.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Demographic dialectal variation in social media: A case study of African-American English", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Su Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lisa", |
|
"middle": [], |
|
"last": "Blodgett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brendan O'", |
|
"middle": [], |
|
"last": "Green", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Connor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1119--1130", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Su Lin Blodgett, Lisa Green, and Brendan O'Connor. 2016. Demographic dialectal variation in social media: A case study of African-American English. In Proceedings of the 2016 Conference on Empiri- cal Methods in Natural Language Processing, pages 1119-1130.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Racial disparity in natural language processing: A case study of social media African-American English", |
|
"authors": [ |
|
{ |
|
"first": "Lin", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brendan O'", |
|
"middle": [], |
|
"last": "Blodgett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Connor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Workshop on Fairness, Accountability, and Transparency in Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Su Lin Blodgett and Brendan O'Connor. 2017. Racial disparity in natural language processing: A case study of social media African-American English. In Proceedings of the 2017 Workshop on Fairness, Ac- countability, and Transparency in Machine Learning.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Man is to computer programmer as woman is to homemaker? debiasing word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Tolga", |
|
"middle": [], |
|
"last": "Bolukbasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "James", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Venkatesh", |
|
"middle": [], |
|
"last": "Zou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Saligrama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kalai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tolga Bolukbasi, Kai-Wei Chang, James Y Zou, Venkatesh Saligrama, and Adam T Kalai. 2016. Man is to computer programmer as woman is to home- maker? debiasing word embeddings. Advances in Neural Information Processing Systems, 29.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Ulfar Erlingsson, Alina Oprea, and Colin Raffel. 2021. Extracting training data from large language models", |
|
"authors": [ |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Carlini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Tramer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Wallace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Jagielski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ariel", |
|
"middle": [], |
|
"last": "Herbert-Voss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katherine", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dawn", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 30th USENIX Security Symposium", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2633--2650", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nicholas Carlini, Florian Tramer, Eric Wallace, Matthew Jagielski, Ariel Herbert-Voss, Katherine Lee, Adam Roberts, Tom Brown, Dawn Song, Ul- far Erlingsson, Alina Oprea, and Colin Raffel. 2021. Extracting training data from large language mod- els. In Proceedings of the 30th USENIX Security Symposium, pages 2633-2650.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Ilaria Giuseppina Penco, and Andrea Claudio Cosentini. 2022. A clarification of the nuances in the fairness metrics landscape", |
|
"authors": [ |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Castelnovo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Riccardo", |
|
"middle": [], |
|
"last": "Crupi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greta", |
|
"middle": [], |
|
"last": "Greco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniele", |
|
"middle": [], |
|
"last": "Regoli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Scientific Reports", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alessandro Castelnovo, Riccardo Crupi, Greta Greco, Daniele Regoli, Ilaria Giuseppina Penco, and An- drea Claudio Cosentini. 2022. A clarification of the nuances in the fairness metrics landscape. Scientific Reports, 12.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Fairness in machine learning: A survey", |
|
"authors": [ |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Caton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Haas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2010.04053" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simon Caton and Christian Haas. 2020. Fairness in machine learning: A survey. arXiv preprint arXiv:2010.04053.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Bias and fairness in natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vinod", |
|
"middle": [], |
|
"last": "Prabhakaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vicente", |
|
"middle": [], |
|
"last": "Ordonez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kai-Wei Chang, Vinod Prabhakaran, and Vicente Or- donez. 2019. Bias and fairness in natural language processing. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Process- ing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP): Tutorial Abstracts.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Retain: An interpretable predictive model for healthcare using reverse time attention mechanism", |
|
"authors": [ |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [ |
|
"Taha" |
|
], |
|
"last": "Bahadori", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimeng", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Kulas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Schuetz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Walter", |
|
"middle": [], |
|
"last": "Stewart", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Edward Choi, Mohammad Taha Bahadori, Jimeng Sun, Joshua Kulas, Andy Schuetz, and Walter Stewart. 2016. Retain: An interpretable predictive model for healthcare using reverse time attention mechanism. Advances in Neural Information Processing Systems, 29.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "What does BERT look at? An analysis of BERT's attention", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Urvashi", |
|
"middle": [], |
|
"last": "Khandelwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "276--286", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Clark, Urvashi Khandelwal, Omer Levy, and Christopher D Manning. 2019. What does BERT look at? An analysis of BERT's attention. In Pro- ceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 276-286.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Certified adversarial robustness via randomized smoothing", |
|
"authors": [ |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elan", |
|
"middle": [], |
|
"last": "Rosenfeld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zico", |
|
"middle": [], |
|
"last": "Kolter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1310--1320", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeremy Cohen, Elan Rosenfeld, and Zico Kolter. 2019. Certified adversarial robustness via randomized smoothing. In Proceedings of the International Con- ference on Machine Learning, pages 1310-1320.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Quantifying social biases in NLP: A generalization and empirical comparison of extrinsic fairness metrics", |
|
"authors": [ |
|
{ |
|
"first": "Paula", |
|
"middle": [], |
|
"last": "Czarnowska", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yogarshi", |
|
"middle": [], |
|
"last": "Vyas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kashif", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "1249--1267", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paula Czarnowska, Yogarshi Vyas, and Kashif Shah. 2021. Quantifying social biases in NLP: A general- ization and empirical comparison of extrinsic fairness metrics. Transactions of the Association for Compu- tational Linguistics, 9:1249-1267.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "A survey of the state of explainable AI for natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Marina", |
|
"middle": [], |
|
"last": "Danilevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ranit", |
|
"middle": [], |
|
"last": "Kun Qian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yannis", |
|
"middle": [], |
|
"last": "Aharonov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ban", |
|
"middle": [], |
|
"last": "Katsis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prithviraj", |
|
"middle": [], |
|
"last": "Kawas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 1st Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 10th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "447--459", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marina Danilevsky, Kun Qian, Ranit Aharonov, Yan- nis Katsis, Ban Kawas, and Prithviraj Sen. 2020. A survey of the state of explainable AI for natural lan- guage processing. In Proceedings of the 1st Confer- ence of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 10th Interna- tional Joint Conference on Natural Language Pro- cessing, pages 447-459, Suzhou, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Opportunities and challenges in explainable artificial intelligence (xai): A survey", |
|
"authors": [ |
|
{ |
|
"first": "Arun", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Rad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2006.11371" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arun Das and Paul Rad. 2020. Opportunities and chal- lenges in explainable artificial intelligence (xai): A survey. arXiv preprint arXiv:2006.11371.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Eraser: A benchmark to evaluate rationalized NLP models", |
|
"authors": [ |
|
{ |
|
"first": "Jay", |
|
"middle": [], |
|
"last": "Deyoung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarthak", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nazneen", |
|
"middle": [], |
|
"last": "Fatema Rajani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Lehman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Byron C", |
|
"middle": [], |
|
"last": "Wallace", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4443--4458", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jay DeYoung, Sarthak Jain, Nazneen Fatema Rajani, Eric Lehman, Caiming Xiong, Richard Socher, and Byron C Wallace. 2020. Eraser: A benchmark to evaluate rationalized NLP models. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4443-4458.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "You shouldn't trust me: Learning models which conceal unfairness from multiple explanation methods", |
|
"authors": [ |
|
{ |
|
"first": "Botty", |
|
"middle": [], |
|
"last": "Dimanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Umang", |
|
"middle": [], |
|
"last": "Bhatt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mateja", |
|
"middle": [], |
|
"last": "Jamnik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adrian", |
|
"middle": [], |
|
"last": "Weller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the AAAI Workshop on Artificial Intelligence Safety (SafeAI)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Botty Dimanov, Umang Bhatt, Mateja Jamnik, and Adrian Weller. 2020. You shouldn't trust me: Learn- ing models which conceal unfairness from multiple explanation methods. In Proceedings of the AAAI Workshop on Artificial Intelligence Safety (SafeAI).", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Towards a rigorous science of interpretable machine learning", |
|
"authors": [ |
|
{ |
|
"first": "Finale", |
|
"middle": [], |
|
"last": "Doshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-", |
|
"middle": [], |
|
"last": "Velez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Been", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1702.08608" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Finale Doshi-Velez and Been Kim. 2017. Towards a rigorous science of interpretable machine learning. arXiv preprint arXiv:1702.08608.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Causal inference in natural language processing: Estimation, prediction, interpretation and beyond", |
|
"authors": [ |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Feder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katherine", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Keith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emaad", |
|
"middle": [], |
|
"last": "Manzoor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Reid", |
|
"middle": [], |
|
"last": "Pryzant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhanya", |
|
"middle": [], |
|
"last": "Sridhar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zach", |
|
"middle": [], |
|
"last": "Wood-Doughty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Eisenstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Grimmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roi", |
|
"middle": [], |
|
"last": "Reichart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Margaret", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Roberts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2109.00725" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amir Feder, Katherine A Keith, Emaad Manzoor, Reid Pryzant, Dhanya Sridhar, Zach Wood-Doughty, Jacob Eisenstein, Justin Grimmer, Roi Reichart, Margaret E Roberts, et al. 2021a. Causal inference in natural lan- guage processing: Estimation, prediction, interpreta- tion and beyond. arXiv preprint arXiv:2109.00725.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Causalm: Causal model explanation through counterfactual language models", |
|
"authors": [ |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Feder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nadav", |
|
"middle": [], |
|
"last": "Oved", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Uri", |
|
"middle": [], |
|
"last": "Shalit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roi", |
|
"middle": [], |
|
"last": "Reichart", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Computational Linguistics", |
|
"volume": "47", |
|
"issue": "2", |
|
"pages": "333--386", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amir Feder, Nadav Oved, Uri Shalit, and Roi Reichart. 2021b. Causalm: Causal model explanation through counterfactual language models. Computational Lin- guistics, 47(2):333-386.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Attention in natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Galassi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Lippi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Torroni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "IEEE Transactions on Neural Networks and Learning Systems", |
|
"volume": "32", |
|
"issue": "10", |
|
"pages": "4291--4308", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrea Galassi, Marco Lippi, and Paolo Torroni. 2020. Attention in natural language processing. IEEE Transactions on Neural Networks and Learning Sys- tems, 32(10):4291-4308.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Evaluating models' local decision boundaries via contrast sets", |
|
"authors": [ |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Artzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victoria", |
|
"middle": [], |
|
"last": "Basmov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Berant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Bogin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sihao", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pradeep", |
|
"middle": [], |
|
"last": "Dasigi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dheeru", |
|
"middle": [], |
|
"last": "Dua", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanai", |
|
"middle": [], |
|
"last": "Elazar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ananth", |
|
"middle": [], |
|
"last": "Gottumukkala", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1307--1323", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matt Gardner, Yoav Artzi, Victoria Basmov, Jonathan Berant, Ben Bogin, Sihao Chen, Pradeep Dasigi, Dheeru Dua, Yanai Elazar, Ananth Gottumukkala, et al. 2020. Evaluating models' local decision bound- aries via contrast sets. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 1307-1323.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Counterfactual fairness in text classification through robustness", |
|
"authors": [ |
|
{ |
|
"first": "Sahaj", |
|
"middle": [], |
|
"last": "Garg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Perot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicole", |
|
"middle": [], |
|
"last": "Limtiaco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Taly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Ed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Chi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Beutel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 AAAI/ACM Conference on AI, Ethics, and Society", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "219--226", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sahaj Garg, Vincent Perot, Nicole Limtiaco, Ankur Taly, Ed H Chi, and Alex Beutel. 2019. Counterfactual fairness in text classification through robustness. In Proceedings of the 2019 AAAI/ACM Conference on AI, Ethics, and Society, pages 219-226.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Intrinsic bias metrics do not correlate with application bias", |
|
"authors": [ |
|
{ |
|
"first": "Seraphina", |
|
"middle": [], |
|
"last": "Goldfarb-Tarrant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Marchant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ricardo", |
|
"middle": [ |
|
"Mu\u00f1oz" |
|
], |
|
"last": "S\u00e1nchez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mugdha", |
|
"middle": [], |
|
"last": "Pandya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Lopez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1926--1940", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seraphina Goldfarb-Tarrant, Rebecca Marchant, Ri- cardo Mu\u00f1oz S\u00e1nchez, Mugdha Pandya, and Adam Lopez. 2021. Intrinsic bias metrics do not correlate with application bias. In Proceedings of the 59th An- nual Meeting of the Association for Computational Linguistics and the 11th International Joint Confer- ence on Natural Language Processing (Volume 1: Long Papers), pages 1926-1940.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Marrying fairness and explainability in supervised learning", |
|
"authors": [ |
|
{ |
|
"first": "Przemyslaw", |
|
"middle": [], |
|
"last": "Grabowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Perello", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aarshee", |
|
"middle": [], |
|
"last": "Mishra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2204.02947" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Przemyslaw Grabowicz, Nicholas Perello, and Aarshee Mishra. 2022. Marrying fairness and explain- ability in supervised learning. arXiv preprint arXiv:2204.02947.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Beyond distributive fairness in algorithmic decision making: Feature selection for procedurally fair learning", |
|
"authors": [ |
|
{ |
|
"first": "Nina", |
|
"middle": [], |
|
"last": "Grgi\u0107-Hla\u010da", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Muhammad", |
|
"middle": [], |
|
"last": "Bilal Zafar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Krishna", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Gummadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adrian", |
|
"middle": [], |
|
"last": "Weller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nina Grgi\u0107-Hla\u010da, Muhammad Bilal Zafar, Krishna P Gummadi, and Adrian Weller. 2018. Beyond distribu- tive fairness in algorithmic decision making: Feature selection for procedurally fair learning. In Proceed- ings of the AAAI Conference on Artificial Intelligence, volume 32.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "A survey of methods for explaining black box models", |
|
"authors": [ |
|
{ |
|
"first": "Riccardo", |
|
"middle": [], |
|
"last": "Guidotti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Monreale", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salvatore", |
|
"middle": [], |
|
"last": "Ruggieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franco", |
|
"middle": [], |
|
"last": "Turini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fosca", |
|
"middle": [], |
|
"last": "Giannotti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dino", |
|
"middle": [], |
|
"last": "Pedreschi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ACM Computing Surveys (CSUR)", |
|
"volume": "51", |
|
"issue": "5", |
|
"pages": "1--42", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Riccardo Guidotti, Anna Monreale, Salvatore Ruggieri, Franco Turini, Fosca Giannotti, and Dino Pedreschi. 2018. A survey of methods for explaining black box models. ACM Computing Surveys (CSUR), 51(5):1- 42.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "On calibration of modern neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Chuan", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoff", |
|
"middle": [], |
|
"last": "Pleiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kilian Q", |
|
"middle": [], |
|
"last": "Weinberger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1321--1330", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian Q Wein- berger. 2017. On calibration of modern neural net- works. In Proceedings of the International Confer- ence on Machine Learning, pages 1321-1330.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Explaining black box predictions and unveiling data artifacts through influence functions", |
|
"authors": [ |
|
{ |
|
"first": "Xiaochuang", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Byron", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulia", |
|
"middle": [], |
|
"last": "Wallace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Tsvetkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5553--5563", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaochuang Han, Byron C Wallace, and Yulia Tsvetkov. 2020. Explaining black box predictions and unveil- ing data artifacts through influence functions. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 5553- 5563.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Designing and interpreting probes with control tasks", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Hewitt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2733--2743", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Hewitt and Percy Liang. 2019. Designing and in- terpreting probes with control tasks. In Proceedings of the 2019 Conference on Empirical Methods in Nat- ural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 2733-2743.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Moving beyond \"algorithmic bias is a data problem", |
|
"authors": [ |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Hooker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Patterns", |
|
"volume": "2", |
|
"issue": "4", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sara Hooker. 2021. Moving beyond \"algorithmic bias is a data problem\". Patterns, 2(4):100241.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Attention is not explanation", |
|
"authors": [ |
|
{ |
|
"first": "Sarthak", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Byron", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wallace", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "3543--3556", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarthak Jain and Byron C Wallace. 2019. Attention is not explanation. In Proceedings of the 2019 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long and Short Pa- pers), pages 3543-3556.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Towards hierarchical importance attribution: Explaining compositional semantics for neural sequence models", |
|
"authors": [ |
|
{ |
|
"first": "Xisen", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhongyu", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junyi", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiangyang", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xisen Jin, Zhongyu Wei, Junyi Du, Xiangyang Xue, and Xiang Ren. 2019. Towards hierarchical importance attribution: Explaining compositional semantics for neural sequence models. In Proceedings of the Inter- national Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Explaining the efficacy of counterfactually augmented data", |
|
"authors": [ |
|
{ |
|
"first": "Divyansh", |
|
"middle": [], |
|
"last": "Kaushik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amrith", |
|
"middle": [], |
|
"last": "Setlur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Eduard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [ |
|
"Chase" |
|
], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lipton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Divyansh Kaushik, Amrith Setlur, Eduard H Hovy, and Zachary Chase Lipton. 2020. Explaining the efficacy of counterfactually augmented data. In Proceedings of the International Conference on Learning Repre- sentations.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Contextualizing hate speech classifiers with post-hoc explanation", |
|
"authors": [ |
|
{ |
|
"first": "Brendan", |
|
"middle": [], |
|
"last": "Kennedy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xisen", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aida", |
|
"middle": [ |
|
"Mostafazadeh" |
|
], |
|
"last": "Davani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morteza", |
|
"middle": [], |
|
"last": "Dehghani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5435--5442", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Brendan Kennedy, Xisen Jin, Aida Mostafazadeh Da- vani, Morteza Dehghani, and Xiang Ren. 2020. Con- textualizing hate speech classifiers with post-hoc ex- planation. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 5435-5442.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Interpretability beyond feature attribution: Quantitative testing with concept activation vectors (tcav)", |
|
"authors": [ |
|
{ |
|
"first": "Been", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Wattenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Gilmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carrie", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Wexler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernanda", |
|
"middle": [], |
|
"last": "Viegas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2668--2677", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Been Kim, Martin Wattenberg, Justin Gilmer, Carrie Cai, James Wexler, Fernanda Viegas, et al. 2018. In- terpretability beyond feature attribution: Quantitative testing with concept activation vectors (tcav). In Pro- ceedings of the International Conference on Machine Learning, pages 2668-2677.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Examining gender and race bias in two hundred sentiment analysis systems", |
|
"authors": [ |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saif", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Seventh Joint Conference on Lexical and Computational Semantics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "43--53", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Svetlana Kiritchenko and Saif Mohammad. 2018. Ex- amining gender and race bias in two hundred senti- ment analysis systems. In Proceedings of the Seventh Joint Conference on Lexical and Computational Se- mantics, pages 43-53.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Understanding black-box predictions via influence functions", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Pang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Koh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1885--1894", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pang Wei Koh and Percy Liang. 2017. Understanding black-box predictions via influence functions. In Pro- ceedings of the International Conference on Machine Learning, pages 1885-1894.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Counterfactual fairness. Advances in neural information processing systems", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Matt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Kusner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Loftus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ricardo", |
|
"middle": [], |
|
"last": "Russell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Silva", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matt J Kusner, Joshua Loftus, Chris Russell, and Ri- cardo Silva. 2017. Counterfactual fairness. Advances in neural information processing systems, 30.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "Explanation-based human debugging of NLP models: A survey", |
|
"authors": [ |
|
{ |
|
"first": "Piyawat", |
|
"middle": [], |
|
"last": "Lertvittayakumjorn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francesca", |
|
"middle": [], |
|
"last": "Toni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "1508--1528", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piyawat Lertvittayakumjorn and Francesca Toni. 2021. Explanation-based human debugging of NLP mod- els: A survey. Transactions of the Association for Computational Linguistics, 9:1508-1528.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "Causal reasoning for algorithmic fairness", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Joshua R Loftus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Russell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Matt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ricardo", |
|
"middle": [], |
|
"last": "Kusner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Silva", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1805.05859" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joshua R Loftus, Chris Russell, Matt J Kusner, and Ri- cardo Silva. 2018. Causal reasoning for algorithmic fairness. arXiv preprint arXiv:1805.05859.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "A unified approach to interpreting model predictions", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Scott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Su-In", |
|
"middle": [], |
|
"last": "Lundberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "4765--4774", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Scott M Lundberg and Su-In Lee. 2017. A unified ap- proach to interpreting model predictions. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems 30, pages 4765-4774. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "Metamorphic testing and certified mitigation of fairness violations in NLP models", |
|
"authors": [ |
|
{ |
|
"first": "Pingchuan", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuai", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jin", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the Twenty-Ninth International Joint Conferences on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "458--465", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pingchuan Ma, Shuai Wang, and Jin Liu. 2021. Meta- morphic testing and certified mitigation of fairness violations in NLP models. In Proceedings of the Twenty-Ninth International Joint Conferences on Ar- tificial Intelligence, pages 458-465.", |
|
"links": null |
|
}, |
|
"BIBREF55": { |
|
"ref_id": "b55", |
|
"title": "Hatexplain: A benchmark dataset for explainable hate speech detection", |
|
"authors": [ |
|
{ |
|
"first": "Binny", |
|
"middle": [], |
|
"last": "Mathew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Punyajoy", |
|
"middle": [], |
|
"last": "Saha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Seid Muhie Yimam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pawan", |
|
"middle": [], |
|
"last": "Biemann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Animesh", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mukherjee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "35", |
|
"issue": "", |
|
"pages": "14867--14875", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Binny Mathew, Punyajoy Saha, Seid Muhie Yimam, Chris Biemann, Pawan Goyal, and Animesh Mukher- jee. 2021. Hatexplain: A benchmark dataset for ex- plainable hate speech detection. In Proceedings of the AAAI Conference on Artificial Intelligence, vol- ume 35, pages 14867-14875.", |
|
"links": null |
|
}, |
|
"BIBREF56": { |
|
"ref_id": "b56", |
|
"title": "A survey on bias and fairness in machine learning", |
|
"authors": [ |
|
{ |
|
"first": "Ninareh", |
|
"middle": [], |
|
"last": "Mehrabi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fred", |
|
"middle": [], |
|
"last": "Morstatter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nripsuta", |
|
"middle": [], |
|
"last": "Saxena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Lerman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aram", |
|
"middle": [], |
|
"last": "Galstyan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "ACM Computing Surveys (CSUR)", |
|
"volume": "54", |
|
"issue": "6", |
|
"pages": "1--35", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ninareh Mehrabi, Fred Morstatter, Nripsuta Saxena, Kristina Lerman, and Aram Galstyan. 2021. A sur- vey on bias and fairness in machine learning. ACM Computing Surveys (CSUR), 54(6):1-35.", |
|
"links": null |
|
}, |
|
"BIBREF57": { |
|
"ref_id": "b57", |
|
"title": "Ethics sheet for automatic emotion recognition and sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Saif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saif M Mohammad. 2022. Ethics sheet for automatic emotion recognition and sentiment analysis. Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF58": { |
|
"ref_id": "b58", |
|
"title": "From what to how: an initial review of publicly available AI ethics tools, methods and research to translate principles into practices", |
|
"authors": [ |
|
{ |
|
"first": "Jessica", |
|
"middle": [], |
|
"last": "Morley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luciano", |
|
"middle": [], |
|
"last": "Floridi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Libby", |
|
"middle": [], |
|
"last": "Kinsey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anat", |
|
"middle": [], |
|
"last": "Elhalal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Ethics, Governance, and Policies in Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "153--183", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jessica Morley, Luciano Floridi, Libby Kinsey, and Anat Elhalal. 2021. From what to how: an initial review of publicly available AI ethics tools, methods and re- search to translate principles into practices. In Ethics, Governance, and Policies in Artificial Intelligence, pages 153-183. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF59": { |
|
"ref_id": "b59", |
|
"title": "Do the ends justify the means? variation in the distributive and procedural fairness of machine learning algorithms", |
|
"authors": [ |
|
{ |
|
"first": "Lily", |
|
"middle": [], |
|
"last": "Morse", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Horia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Teodorescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yazeed", |
|
"middle": [], |
|
"last": "Awwad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerald C", |
|
"middle": [], |
|
"last": "Kane", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Journal of Business Ethics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--13", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lily Morse, Mike Horia M Teodorescu, Yazeed Awwad, and Gerald C Kane. 2021. Do the ends justify the means? variation in the distributive and procedural fairness of machine learning algorithms. Journal of Business Ethics, pages 1-13.", |
|
"links": null |
|
}, |
|
"BIBREF60": { |
|
"ref_id": "b60", |
|
"title": "Understanding and interpreting the impact of user context in hate speech detection", |
|
"authors": [ |
|
{ |
|
"first": "Edoardo", |
|
"middle": [], |
|
"last": "Mosca", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maximilian", |
|
"middle": [], |
|
"last": "Wich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georg", |
|
"middle": [], |
|
"last": "Groh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the Ninth International Workshop on Natural Language Processing for Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "91--102", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Edoardo Mosca, Maximilian Wich, and Georg Groh. 2021. Understanding and interpreting the impact of user context in hate speech detection. In Proceedings of the Ninth International Workshop on Natural Lan- guage Processing for Social Media, pages 91-102.", |
|
"links": null |
|
}, |
|
"BIBREF61": { |
|
"ref_id": "b61", |
|
"title": "Improving generalizability in implicitly abusive language detection with concept activation vectors", |
|
"authors": [ |
|
{ |
|
"first": "Isar", |
|
"middle": [], |
|
"last": "Nejadgholi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Kathleen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Fraser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Isar Nejadgholi, Kathleen C Fraser, and Svetlana Kir- itchenko. 2022. Improving generalizability in im- plicitly abusive language detection with concept ac- tivation vectors. In Proceedings of the 60th Annual Meeting of the Association for Computational Lin- guistics, Dublin, Ireland.", |
|
"links": null |
|
}, |
|
"BIBREF62": { |
|
"ref_id": "b62", |
|
"title": "On fairness and interpretability", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Deepak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Sanil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joemon", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Jose", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the IJCAI Workshop on AI for Social Good", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deepak P, Sanil V, and Joemon M. Jose. 2021. On fairness and interpretability. In Proceedings of the IJCAI Workshop on AI for Social Good.", |
|
"links": null |
|
}, |
|
"BIBREF63": { |
|
"ref_id": "b63", |
|
"title": "Deep causal graphs for causal inference, black-box explainability and fairness", |
|
"authors": [ |
|
{ |
|
"first": "Alvaro", |
|
"middle": [], |
|
"last": "Parafita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jordi", |
|
"middle": [], |
|
"last": "Vitria", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Artificial Intelligence Research and Development: Proceedings of the 23rd International Conference of the Catalan Association for Artificial Intelligence", |
|
"volume": "339", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alvaro Parafita and Jordi Vitria. 2021. Deep causal graphs for causal inference, black-box explainability and fairness. In Artificial Intelligence Research and Development: Proceedings of the 23rd International Conference of the Catalan Association for Artificial Intelligence, volume 339, page 415. IOS Press.", |
|
"links": null |
|
}, |
|
"BIBREF64": { |
|
"ref_id": "b64", |
|
"title": "Causality", |
|
"authors": [ |
|
{ |
|
"first": "Judea", |
|
"middle": [], |
|
"last": "Pearl", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Judea Pearl. 2009. Causality. Cambridge University Press.", |
|
"links": null |
|
}, |
|
"BIBREF65": { |
|
"ref_id": "b65", |
|
"title": "Perturbation sensitivity analysis to detect unintended model biases", |
|
"authors": [ |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Vinodkumar Prabhakaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Margaret", |
|
"middle": [], |
|
"last": "Hutchinson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5740--5745", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vinodkumar Prabhakaran, Ben Hutchinson, and Mar- garet Mitchell. 2019. Perturbation sensitivity analy- sis to detect unintended model biases. In Proceedings of the 2019 Conference on Empirical Methods in Nat- ural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 5740-5745.", |
|
"links": null |
|
}, |
|
"BIBREF66": { |
|
"ref_id": "b66", |
|
"title": "Interpretable data-based explanations for fairness debugging", |
|
"authors": [ |
|
{ |
|
"first": "Romila", |
|
"middle": [], |
|
"last": "Pradhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiongli", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Boris", |
|
"middle": [], |
|
"last": "Glavic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Babak", |
|
"middle": [], |
|
"last": "Salimi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "Proceedings of the 2022 ACM SIGMOD International Conference on Management of Data", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Romila Pradhan, Jiongli Zhu, Boris Glavic, and Babak Salimi. 2022. Interpretable data-based explanations for fairness debugging. In Proceedings of the 2022 ACM SIGMOD International Conference on Manage- ment of Data.", |
|
"links": null |
|
}, |
|
"BIBREF67": { |
|
"ref_id": "b67", |
|
"title": "Does robustness improve fairness? approaching fairness with word substitution robustness methods for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Yada", |
|
"middle": [], |
|
"last": "Pruksachatkun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Satyapriya", |
|
"middle": [], |
|
"last": "Krishna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jwala", |
|
"middle": [], |
|
"last": "Dhamala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3320--3331", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yada Pruksachatkun, Satyapriya Krishna, Jwala Dhamala, Rahul Gupta, and Kai-Wei Chang. 2021. Does robustness improve fairness? approaching fair- ness with word substitution robustness methods for text classification. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, pages 3320-3331.", |
|
"links": null |
|
}, |
|
"BIBREF68": { |
|
"ref_id": "b68", |
|
"title": "Explaining the predictions of any classifier", |
|
"authors": [ |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Marco Tulio Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Guestrin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1135--1144", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Tulio Ribeiro, Sameer Singh, and Carlos Guestrin. 2016. \"Why should i trust you?\" Explain- ing the predictions of any classifier. In Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pages 1135-1144.", |
|
"links": null |
|
}, |
|
"BIBREF69": { |
|
"ref_id": "b69", |
|
"title": "Anchors: High-precision modelagnostic explanations", |
|
"authors": [ |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Marco Tulio Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Guestrin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Tulio Ribeiro, Sameer Singh, and Carlos Guestrin. 2018a. Anchors: High-precision model- agnostic explanations. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32.", |
|
"links": null |
|
}, |
|
"BIBREF70": { |
|
"ref_id": "b70", |
|
"title": "Semantically equivalent adversarial rules for debugging NLP models", |
|
"authors": [ |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Marco Tulio Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Guestrin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "856--865", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Tulio Ribeiro, Sameer Singh, and Carlos Guestrin. 2018b. Semantically equivalent adversarial rules for debugging NLP models. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 856-865.", |
|
"links": null |
|
}, |
|
"BIBREF71": { |
|
"ref_id": "b71", |
|
"title": "Beyond accuracy: Behavioral testing of nlp models with checklist", |
|
"authors": [ |
|
{ |
|
"first": "Tongshuang", |
|
"middle": [], |
|
"last": "Marco Tulio Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Guestrin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4902--4912", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Tulio Ribeiro, Tongshuang Wu, Carlos Guestrin, and Sameer Singh. 2020. Beyond accuracy: Behav- ioral testing of nlp models with checklist. In Proceed- ings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4902-4912.", |
|
"links": null |
|
}, |
|
"BIBREF72": { |
|
"ref_id": "b72", |
|
"title": "Explaining NLP models via minimal contrastive editing (MiCE)", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Ross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ana", |
|
"middle": [], |
|
"last": "Marasovi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Peters", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3840--3852", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Ross, Ana Marasovi\u0107, and Matthew E Peters. 2021. Explaining NLP models via minimal con- trastive editing (MiCE). In Findings of the Asso- ciation for Computational Linguistics: ACL-IJCNLP 2021, pages 3840-3852.", |
|
"links": null |
|
}, |
|
"BIBREF73": { |
|
"ref_id": "b73", |
|
"title": "Hatecheck: Functional tests for hate speech detection models", |
|
"authors": [ |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "R\u00f6ttger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bertie", |
|
"middle": [], |
|
"last": "Vidgen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zeerak", |
|
"middle": [], |
|
"last": "Waseem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Helen", |
|
"middle": [], |
|
"last": "Margetts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Janet", |
|
"middle": [], |
|
"last": "Pierrehumbert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "41--58", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paul R\u00f6ttger, Bertie Vidgen, Dong Nguyen, Zeerak Waseem, Helen Margetts, and Janet Pierrehumbert. 2021. Hatecheck: Functional tests for hate speech detection models. In Proceedings of the 59th Annual Meeting of the Association for Computational Lin- guistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 41-58.", |
|
"links": null |
|
}, |
|
"BIBREF74": { |
|
"ref_id": "b74", |
|
"title": "Gender bias in coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Rudinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Naradowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Leonard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Van Durme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "8--14", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rachel Rudinger, Jason Naradowsky, Brian Leonard, and Benjamin Van Durme. 2018. Gender bias in coreference resolution. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 8-14.", |
|
"links": null |
|
}, |
|
"BIBREF75": { |
|
"ref_id": "b75", |
|
"title": "The risk of racial bias in hate speech detection", |
|
"authors": [ |
|
{ |
|
"first": "Maarten", |
|
"middle": [], |
|
"last": "Sap", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dallas", |
|
"middle": [], |
|
"last": "Card", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saadia", |
|
"middle": [], |
|
"last": "Gabriel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah A", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1668--1678", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maarten Sap, Dallas Card, Saadia Gabriel, Yejin Choi, and Noah A Smith. 2019. The risk of racial bias in hate speech detection. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1668-1678.", |
|
"links": null |
|
}, |
|
"BIBREF76": { |
|
"ref_id": "b76", |
|
"title": "Grad-cam: Visual explanations from deep networks via gradient-based localization", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Ramprasaath", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Selvaraju", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhishek", |
|
"middle": [], |
|
"last": "Cogswell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramakrishna", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Vedantam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the IEEE International Conference on Computer Vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "618--626", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. 2017. Grad-cam: Visual explanations from deep networks via gradient-based localization. In Proceedings of the IEEE International Conference on Computer Vision, pages 618-626.", |
|
"links": null |
|
}, |
|
"BIBREF77": { |
|
"ref_id": "b77", |
|
"title": "Predictive biases in natural language processing models: A conceptual framework and overview", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Deven Santosh Shah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5248--5264", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deven Santosh Shah, H Andrew Schwartz, and Dirk Hovy. 2020. Predictive biases in natural language processing models: A conceptual framework and overview. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 5248-5264.", |
|
"links": null |
|
}, |
|
"BIBREF78": { |
|
"ref_id": "b78", |
|
"title": "Learning important features through propagating activation differences", |
|
"authors": [ |
|
{ |
|
"first": "Avanti", |
|
"middle": [], |
|
"last": "Shrikumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peyton", |
|
"middle": [], |
|
"last": "Greenside", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anshul", |
|
"middle": [], |
|
"last": "Kundaje", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3145--3153", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Avanti Shrikumar, Peyton Greenside, and Anshul Kun- daje. 2017. Learning important features through propagating activation differences. In Proceedings of the International Conference on Machine Learning, pages 3145-3153.", |
|
"links": null |
|
}, |
|
"BIBREF79": { |
|
"ref_id": "b79", |
|
"title": "Deep inside convolutional networks: Visualising image classification models and saliency maps", |
|
"authors": [ |
|
{ |
|
"first": "Karen", |
|
"middle": [], |
|
"last": "Simonyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Vedaldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Zisserman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2nd International Conference on Learning Representations, Workshop Track", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karen Simonyan, Andrea Vedaldi, and Andrew Zisser- man. 2014. Deep inside convolutional networks: Vi- sualising image classification models and saliency maps. In Proceedings of the 2nd International Conference on Learning Representations, Workshop Track.", |
|
"links": null |
|
}, |
|
"BIBREF80": { |
|
"ref_id": "b80", |
|
"title": "Smoothgrad: removing noise by adding noise", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Smilkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikhil", |
|
"middle": [], |
|
"last": "Thorat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Been", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernanda", |
|
"middle": [], |
|
"last": "Vi\u00e9gas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Wattenberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1706.03825" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Smilkov, Nikhil Thorat, Been Kim, Fernanda Vi\u00e9gas, and Martin Wattenberg. 2017. Smoothgrad: removing noise by adding noise. arXiv preprint arXiv:1706.03825.", |
|
"links": null |
|
}, |
|
"BIBREF81": { |
|
"ref_id": "b81", |
|
"title": "Evaluating gender bias in machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Gabriel", |
|
"middle": [], |
|
"last": "Stanovsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Noah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1679--1684", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gabriel Stanovsky, Noah A Smith, and Luke Zettle- moyer. 2019. Evaluating gender bias in machine translation. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguistics, pages 1679-1684.", |
|
"links": null |
|
}, |
|
"BIBREF82": { |
|
"ref_id": "b82", |
|
"title": "Axiomatic attribution for deep networks", |
|
"authors": [ |
|
{ |
|
"first": "Mukund", |
|
"middle": [], |
|
"last": "Sundararajan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Taly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiqi", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3319--3328", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mukund Sundararajan, Ankur Taly, and Qiqi Yan. 2017. Axiomatic attribution for deep networks. In Pro- ceedings of the International Conference on Machine Learning, pages 3319-3328.", |
|
"links": null |
|
}, |
|
"BIBREF83": { |
|
"ref_id": "b83", |
|
"title": "Distill-and-compare: Auditing black-box models using transparent model distillation", |
|
"authors": [ |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rich", |
|
"middle": [], |
|
"last": "Caruana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giles", |
|
"middle": [], |
|
"last": "Hooker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yin", |
|
"middle": [], |
|
"last": "Lou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "303--310", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarah Tan, Rich Caruana, Giles Hooker, and Yin Lou. 2018. Distill-and-compare: Auditing black-box mod- els using transparent model distillation. In Proceed- ings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society, pages 303-310.", |
|
"links": null |
|
}, |
|
"BIBREF84": { |
|
"ref_id": "b84", |
|
"title": "Information-theoretic probing with minimum description length", |
|
"authors": [ |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Voita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "183--196", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elena Voita and Ivan Titov. 2020. Information-theoretic probing with minimum description length. In Pro- ceedings of the 2020 Conference on Empirical Meth- ods in Natural Language Processing (EMNLP), pages 183-196.", |
|
"links": null |
|
}, |
|
"BIBREF85": { |
|
"ref_id": "b85", |
|
"title": "Interpreting predictions of NLP models", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Wallace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Tutorial Abstracts", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "20--23", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric Wallace, Matt Gardner, and Sameer Singh. 2020. Interpreting predictions of NLP models. In Proceed- ings of the 2020 Conference on Empirical Methods in Natural Language Processing: Tutorial Abstracts, pages 20-23.", |
|
"links": null |
|
}, |
|
"BIBREF86": { |
|
"ref_id": "b86", |
|
"title": "AllenNLP Interpret: A framework for explaining predictions of NLP models", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Wallace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jens", |
|
"middle": [], |
|
"last": "Tuyls", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junlin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjay", |
|
"middle": [], |
|
"last": "Subramanian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP): System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7--12", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric Wallace, Jens Tuyls, Junlin Wang, Sanjay Sub- ramanian, Matt Gardner, and Sameer Singh. 2019. AllenNLP Interpret: A framework for explaining predictions of NLP models. In Proceedings of the 2019 Conference on Empirical Methods in Natu- ral Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP): System Demonstrations, pages 7-12.", |
|
"links": null |
|
}, |
|
"BIBREF87": { |
|
"ref_id": "b87", |
|
"title": "Impact of politically biased data on hate speech classification", |
|
"authors": [ |
|
{ |
|
"first": "Maximilian", |
|
"middle": [], |
|
"last": "Wich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Bauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georg", |
|
"middle": [], |
|
"last": "Groh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Fourth Workshop on Online Abuse and Harms", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "54--64", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maximilian Wich, Jan Bauer, and Georg Groh. 2020. Impact of politically biased data on hate speech clas- sification. In Proceedings of the Fourth Workshop on Online Abuse and Harms, pages 54-64.", |
|
"links": null |
|
}, |
|
"BIBREF88": { |
|
"ref_id": "b88", |
|
"title": "Attention is not not explanation", |
|
"authors": [ |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Wiegreffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuval", |
|
"middle": [], |
|
"last": "Pinter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarah Wiegreffe and Yuval Pinter. 2019. Attention is not not explanation. In Proceedings of the 2019 Confer- ence on Empirical Methods in Natural Language Pro- cessing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 11-20.", |
|
"links": null |
|
}, |
|
"BIBREF89": { |
|
"ref_id": "b89", |
|
"title": "Polyjuice: Generating counterfactuals for explaining, evaluating, and improving models", |
|
"authors": [ |
|
{ |
|
"first": "Tongshuang", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [ |
|
"Tulio" |
|
], |
|
"last": "Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Heer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Weld", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "6707--6723", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.acl-long.523" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tongshuang Wu, Marco Tulio Ribeiro, Jeffrey Heer, and Daniel Weld. 2021. Polyjuice: Generating counter- factuals for explaining, evaluating, and improving models. In Proceedings of the 59th Annual Meet- ing of the Association for Computational Linguistics and the 11th International Joint Conference on Natu- ral Language Processing (Volume 1: Long Papers), pages 6707-6723, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF90": { |
|
"ref_id": "b90", |
|
"title": "Show, attend and tell: Neural image caption generation with visual attention", |
|
"authors": [ |
|
{ |
|
"first": "Kelvin", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aaron", |
|
"middle": [], |
|
"last": "Courville", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhudinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rich", |
|
"middle": [], |
|
"last": "Zemel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2048--2057", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kelvin Xu, Jimmy Ba, Ryan Kiros, Kyunghyun Cho, Aaron Courville, Ruslan Salakhudinov, Rich Zemel, and Yoshua Bengio. 2015. Show, attend and tell: Neural image caption generation with visual attention. In Proceedings of the International Conference on Machine Learning, pages 2048-2057.", |
|
"links": null |
|
}, |
|
"BIBREF91": { |
|
"ref_id": "b91", |
|
"title": "Exploring the efficacy of automatically generated counterfactuals for sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Linyi", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiazheng", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P\u00e1draig", |
|
"middle": [], |
|
"last": "Cunningham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Smyth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruihai", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "306--316", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Linyi Yang, Jiazheng Li, P\u00e1draig Cunningham, Yue Zhang, Barry Smyth, and Ruihai Dong. 2021. Ex- ploring the efficacy of automatically generated coun- terfactuals for sentiment analysis. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 306-316.", |
|
"links": null |
|
}, |
|
"BIBREF92": { |
|
"ref_id": "b92", |
|
"title": "Representer point selection for explaining deep neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Chih-Kuan", |
|
"middle": [], |
|
"last": "Yeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "En-Hsu Yen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pradeep K", |
|
"middle": [], |
|
"last": "Ravikumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chih-Kuan Yeh, Joon Kim, Ian En-Hsu Yen, and Pradeep K Ravikumar. 2018. Representer point selec- tion for explaining deep neural networks. Advances in Neural Information Processing Systems, 31.", |
|
"links": null |
|
}, |
|
"BIBREF93": { |
|
"ref_id": "b93", |
|
"title": "Fairness in decision-making-the causal explanation formula", |
|
"authors": [ |
|
{ |
|
"first": "Junzhe", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elias", |
|
"middle": [], |
|
"last": "Bareinboim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Junzhe Zhang and Elias Bareinboim. 2018. Fairness in decision-making-the causal explanation formula. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32.", |
|
"links": null |
|
}, |
|
"BIBREF94": { |
|
"ref_id": "b94", |
|
"title": "Adversarial attacks on deeplearning models in natural language processing: A survey", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [ |
|
"Emma" |
|
], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Quan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahoud", |
|
"middle": [], |
|
"last": "Sheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenliang", |
|
"middle": [], |
|
"last": "Alhazmi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "ACM Transactions on Intelligent Systems and Technology (TIST)", |
|
"volume": "11", |
|
"issue": "3", |
|
"pages": "1--41", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Emma Zhang, Quan Z Sheng, Ahoud Alhazmi, and Chenliang Li. 2020a. Adversarial attacks on deep- learning models in natural language processing: A survey. ACM Transactions on Intelligent Systems and Technology (TIST), 11(3):1-41.", |
|
"links": null |
|
}, |
|
"BIBREF95": { |
|
"ref_id": "b95", |
|
"title": "why should you trust my explanation?\" understanding uncertainty in LIME explanations", |
|
"authors": [ |
|
{ |
|
"first": "Yujia", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kuangyan", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Madeleine", |
|
"middle": [], |
|
"last": "Udell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the ICML Workshop AI for Social Good", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yujia Zhang, Kuangyan Song, Yiming Sun, Sarah Tan, and Madeleine Udell. 2019. \"why should you trust my explanation?\" understanding uncertainty in LIME explanations. In Proceedings of the ICML Workshop AI for Social Good.", |
|
"links": null |
|
}, |
|
"BIBREF96": { |
|
"ref_id": "b96", |
|
"title": "Effect of confidence and explanation on accuracy and trust calibration in AI-assisted decision making", |
|
"authors": [ |
|
{ |
|
"first": "Yunfeng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vera", |
|
"middle": [], |
|
"last": "Liao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bellamy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "295--305", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yunfeng Zhang, Q Vera Liao, and Rachel KE Bellamy. 2020b. Effect of confidence and explanation on ac- curacy and trust calibration in AI-assisted decision making. In Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency, pages 295-305.", |
|
"links": null |
|
}, |
|
"BIBREF97": { |
|
"ref_id": "b97", |
|
"title": "Gender bias in contextualized word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Jieyu", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianlu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Cotterell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vicente", |
|
"middle": [], |
|
"last": "Ordonez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jieyu Zhao, Tianlu Wang, Mark Yatskar, Ryan Cotterell, Vicente Ordonez, and Kai-Wei Chang. 2019. Gender bias in contextualized word embeddings. In Proceed- ings of the 2019 Conference of the North American Chapter of the Association for Computational Lin- guistics: Human Language Technologies, volume 1.", |
|
"links": null |
|
}, |
|
"BIBREF98": { |
|
"ref_id": "b98", |
|
"title": "Hildif: Interactive debugging of NLI models using influence functions", |
|
"authors": [ |
|
{ |
|
"first": "Hugo", |
|
"middle": [], |
|
"last": "Zylberajch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piyawat", |
|
"middle": [], |
|
"last": "Lertvittayakumjorn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francesca", |
|
"middle": [], |
|
"last": "Toni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Interactive Learning for Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--6", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hugo Zylberajch, Piyawat Lertvittayakumjorn, and Francesca Toni. 2021. Hildif: Interactive debugging of NLI models using influence functions. In Proceed- ings of the First Workshop on Interactive Learning for Natural Language Processing, pages 1-6.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF0": { |
|
"text": "Explainability methods from Sec. 2 categorized as local vs. global and self-explaining vs. post-hoc.", |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"text": "Summary of the studies that apply explainability techniques to uncover unintended biases in NLP systems.", |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |