|
{ |
|
"paper_id": "2022", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:52:44.284914Z" |
|
}, |
|
"title": "Attributing Fair Decisions with Attention Interventions", |
|
"authors": [ |
|
{ |
|
"first": "Ninareh", |
|
"middle": [], |
|
"last": "Mehrabi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Southern California", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Umang", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Southern California", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Fred", |
|
"middle": [], |
|
"last": "Morstatter", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Southern California", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [ |
|
"Ver" |
|
], |
|
"last": "Steeg", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Southern California", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Aram", |
|
"middle": [], |
|
"last": "Galstyan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Southern California", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "The widespread use of Artificial Intelligence (AI) in consequential domains, such as healthcare and parole decision-making systems, has drawn intense scrutiny on the fairness of these methods. However, ensuring fairness is often insufficient as the rationale for a contentious decision needs to be audited, understood, and defended. We propose that the attention mechanism can be used to ensure fair outcomes while simultaneously providing feature attributions to account for how a decision was made. Toward this goal, we design an attention-based model that can be leveraged as an attribution framework. It can identify features responsible for both performance and fairness of the model through attention interventions and attention weight manipulation. Using this attribution framework, we then design a post-processing bias mitigation strategy and compare it with a suite of baselines. We demonstrate the versatility of our approach by conducting experiments on two distinct data types, tabular and textual.", |
|
"pdf_parse": { |
|
"paper_id": "2022", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "The widespread use of Artificial Intelligence (AI) in consequential domains, such as healthcare and parole decision-making systems, has drawn intense scrutiny on the fairness of these methods. However, ensuring fairness is often insufficient as the rationale for a contentious decision needs to be audited, understood, and defended. We propose that the attention mechanism can be used to ensure fair outcomes while simultaneously providing feature attributions to account for how a decision was made. Toward this goal, we design an attention-based model that can be leveraged as an attribution framework. It can identify features responsible for both performance and fairness of the model through attention interventions and attention weight manipulation. Using this attribution framework, we then design a post-processing bias mitigation strategy and compare it with a suite of baselines. We demonstrate the versatility of our approach by conducting experiments on two distinct data types, tabular and textual.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Machine learning algorithms that optimize for performance (e.g., accuracy) often result in unfair outcomes (Mehrabi et al., 2021) . These algorithms capture biases present in the training datasets causing discrimination toward different groups. As machine learning continues to be adopted into fields where discriminatory treatments can lead to legal penalties, fairness and interpretability have become a necessity and a legal incentive in addition to an ethical responsibility (Barocas and Selbst, 2016; Hacker et al., 2020) . Existing methods for fair machine learning include applying complex transformations to the data so that resulting representations are fair (Gupta et al., 2021; Moyer et al., 2018; Roy and Boddeti, 2019; Jaiswal et al., 2020; Song et al., 2019) , adding regularizers to incorporate fairness (Zafar et al., 2017; Kamishima et al., 2012; Mehrabi et al., 2020) , or modifying the outcomes of unfair machine learning algorithms to ensure fairness (Hardt et al., 2016) , among others. Here we present an alternative approach, which works by identifying the significance of different features in causing unfairness and reducing their effect on the outcomes using an attention-based mechanism.", |
|
"cite_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 129, |
|
"text": "(Mehrabi et al., 2021)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 479, |
|
"end": 505, |
|
"text": "(Barocas and Selbst, 2016;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 506, |
|
"end": 526, |
|
"text": "Hacker et al., 2020)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 668, |
|
"end": 688, |
|
"text": "(Gupta et al., 2021;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 689, |
|
"end": 708, |
|
"text": "Moyer et al., 2018;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 709, |
|
"end": 731, |
|
"text": "Roy and Boddeti, 2019;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 732, |
|
"end": 753, |
|
"text": "Jaiswal et al., 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 754, |
|
"end": 772, |
|
"text": "Song et al., 2019)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 819, |
|
"end": 839, |
|
"text": "(Zafar et al., 2017;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 840, |
|
"end": 863, |
|
"text": "Kamishima et al., 2012;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 864, |
|
"end": 885, |
|
"text": "Mehrabi et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 971, |
|
"end": 991, |
|
"text": "(Hardt et al., 2016)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "With the advancement of transformer models and the attention mechanism (Vaswani et al., 2017) , recent research in Natural Language Processing (NLP) has tried to analyze the effects and the interpretability of the attention weights on the decision making process (Wiegreffe and Pinter, 2019; Jain and Wallace, 2019; Serrano and Smith, 2019; Hao et al., 2021) . Taking inspiration from these works, we propose to use an attention-based mechanism to study the fairness of a model. The attention mechanism provides an intuitive way to capture the effect of each attribute on the outcomes. Thus, by introducing the attention mechanism, we can analyze the effect of specific input features on the model's fairness. We form visualizations that explain model outcomes and help us decide which attributes contribute to accuracy vs. fairness. We also show and confirm the observed effect of indirect discrimination in previous work (Zliobaite, 2015; Hajian and Domingo-Ferrer, 2013; Zhang et al., 2017) in which even with the absence of the sensitive attribute, we can still have an unfair model due to the existence of proxy attributes. Furthermore, we show that in certain scenarios those proxy attributes contribute more to the model unfairness than the sensitive attribute itself.", |
|
"cite_spans": [ |
|
{ |
|
"start": 71, |
|
"end": 93, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 143, |
|
"end": 148, |
|
"text": "(NLP)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 263, |
|
"end": 291, |
|
"text": "(Wiegreffe and Pinter, 2019;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 292, |
|
"end": 315, |
|
"text": "Jain and Wallace, 2019;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 316, |
|
"end": 340, |
|
"text": "Serrano and Smith, 2019;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 341, |
|
"end": 358, |
|
"text": "Hao et al., 2021)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 923, |
|
"end": 940, |
|
"text": "(Zliobaite, 2015;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 941, |
|
"end": 973, |
|
"text": "Hajian and Domingo-Ferrer, 2013;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 974, |
|
"end": 993, |
|
"text": "Zhang et al., 2017)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Based on the above observations, we propose a post-processing bias mitigation technique by diminishing the weights of features most responsible for causing unfairness. We perform studies on datasets with different modalities and show the flexibility of our framework on both tabu-lar and large-scale text data, which is an advantage over existing interpretable non-neural and non-attention-based models. Furthermore, our approach provides a competitive and interpretable baseline compared to several recent fair learning techniques.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this section, we describe our classification model that incorporates the attention mechanism. It can be applied to both text and tabular data and is inspired by works in attention-based models in text-classification (Zhou et al., 2016) . We incorporate attention over the input features. Next, we describe how this attention over features can attribute the model's unfairness to certain features. Finally, using this attribution framework, we propose a post-processing approach for mitigating unfairness.", |
|
"cite_spans": [ |
|
{ |
|
"start": 219, |
|
"end": 238, |
|
"text": "(Zhou et al., 2016)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this work, we focus on binary classification tasks. We assume access to a dataset of triplets", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "D = {x i , y i , a i } N i=1 , where x i , y i , a i are i.i.d. samples from data distribution p(x, y, a). a \u2208 {a 1 , . . . a l }", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "is a discrete variable with l possible values and denotes the sensitive or protected attributes with respect to which we want to be fair, y \u2208 {0, 1} is the true label, x \u2208 R m are features of the sample which may include sensitive attributes. We use\u0177 o to denote the binary outcome of the original model, and\u0177 k z will represent the binary outcome of a model in which the attention weights corresponding to k th feature are zeroed out. Our framework is flexible and general that it can be used to find attribution for any fairness notion. More particularly, we work with the group fairness measures like Statistical Parity (Dwork et al., 2012) , Equalized Odds (Hardt et al., 2016) , and Equality of Opportunity (Hardt et al., 2016) , which are defined as: 1 Statistical Parity Difference (SPD):", |
|
"cite_spans": [ |
|
{ |
|
"start": 623, |
|
"end": 643, |
|
"text": "(Dwork et al., 2012)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 661, |
|
"end": 681, |
|
"text": "(Hardt et al., 2016)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 712, |
|
"end": 732, |
|
"text": "(Hardt et al., 2016)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "SPD(\u0177, a) = max a i ,a j |P (\u0177 = 1 | a = a i ) \u2212P (\u0177 = 1 | a = a j )|", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Equality of Opportunity Difference (EqOpp):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EqOpp(\u0177, a, y) = max a i ,a j |P (\u0177 = 1 | a = ai, y = 1) \u2212P (\u0177 = 1 | a = aj, y = 1)|", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Figure 1: (a) In general classification model, for each feature f k a vector representation e k of length d e is learned. This is passed to the attention layer which produces a d e -dimensional vector representation for the sample instance i which is passed to two dense layers to get the final classification output. (b) The Attribution framework has the same architecture as the general model. One outcome is obtained through the original model and another through the model that has some attention weights zeroed. The observed difference in accuracy and fairness measures will indicate the effect of the zeroed out features on accuracy and fairness.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Equalized Odds Difference (EqOdd):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EqOdd(\u0177, a, y) = max a i ,a j max y\u2208{0,1} |P (\u0177 = 1 | a = ai, y = y) \u2212P (\u0177 = 1 | a = aj, y = y)|", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We consider each feature value as an individual entity (like the words are considered in textclassification) and learn a fixed-size embedding {e k } m k=1 , e k \u2208 R d e for each feature, {f k } m k=1 . These vectors are passed to the attention layer. The Computation of attention weights and the final representation for a sample is described in Eq. 1. E = [e 1 . . . e m ], E \u2208 R d e \u00d7m is the concatenation of all the embeddings, w \u2208 R d e is a learnable parameter, r \u2208 R d e denotes the overall sample representation, and \u03b1 \u2208 R m denotes the attention weights.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "General Model: Incorporating Attention over Inputs in Classifiers", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "H = tanh(E); \u03b1 = softmax(w T H); r = tanh(E\u03b1 T ) (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "General Model: Incorporating Attention over Inputs in Classifiers", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The resulting representation, r, is passed to the feed-forward layers for classification. In this work, we have used two feed-forward layers (See Fig. 1 for overall architecture).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 146, |
|
"end": 152, |
|
"text": "Fig. 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "General Model: Incorporating Attention over Inputs in Classifiers", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The aforementioned classification model with the attention mechanism combines input feature em-Algorithm 1: Bias Mitigation by Attention", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fairness Attribution with Attention", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "1 Input: decay rate d r (0 \u2264 d r < 1), n test", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fairness Attribution with Attention", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "samples indexed by variable i.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fairness Attribution with Attention", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "2 Output: final predictions, unfair features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fairness Attribution with Attention", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "3 Calculate the attention weights \u03b1 ki for k th feature in sample i using the attention layer as in Eq. 1. return\u0176 , unfair_feature_set beddings by taking a weighted combination. By manipulating the weights, we can intuitively capture the effects of specific features on the output. To this end, we observe the effect of each attribute on the fairness of outcomes by zeroing out or reducing its attention weights and recording the change. Other works have used similar ideas to understand the effect of attention weights on accuracy and evaluate interpretability of the attention weights by comparing the difference in outcomes in terms of measures such as Jensen-Shannon Divergence (Serrano and Smith, 2019) but not for fairness. We are interested in the effect of features on fairness measures. Thus, we measure the difference in fairness of the outcomes based on the desired fairness measure. A large change in fairness measure and a small change in performance of the model would indicate that this feature is mostly responsible for unfairness, and it can be dropped without causing large impacts on performance. The overall framework is shown in Fig. 1 . First, the outcomes are recorded with the original attention weights intact (Fig. 1a) . Next, attention weights corresponding to a particular feature are zeroed out, and the difference in performance and fairness measures is recorded (Fig. 1b) .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1151, |
|
"end": 1157, |
|
"text": "Fig. 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1236, |
|
"end": 1245, |
|
"text": "(Fig. 1a)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1394, |
|
"end": 1403, |
|
"text": "(Fig. 1b)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Fairness Attribution with Attention", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "4 unfair_feature_set = {} 5 for each feature (index) k do 6 if SPD(\u0177 o , a) \u2212 SPD(\u0177 k z , a) \u2265", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fairness Attribution with Attention", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Based on the observed differences, one may conclude how incorporating this feature contributes to fairness/unfairness. To measure the effect of the k th feature on different fairness measures, we consider the difference in the fairness of outcomes of the original model and model with k th feature's effect removed. For example, for statistical parity difference, we will consider SPD(\u0177 o , a) \u2212 SPD(\u0177 k z , a). A negative value will indicate that the k th feature helps mitigate unfairness, and a positive value will indicate that the k th feature contributes to unfairness. This is because\u0177 k z captures the exclusion of the k th feature (zeroed out attention weight for that feature) from the decision-making process. If the value is positive, it indicates that not having this feature makes the bias lower than when we include it. Notice here, we focus on global attribution, so we measure this over all the samples; however, this can also be turned into local attribution by focusing on individual sample i only.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fairness Attribution with Attention", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "As discussed in the previous section, we can identify features that contribute to unfair outcomes according to different fairness measures. A simple technique to mitigate or reduce bias is to reduce the attention weights of these features. This mitigation technique is outlined in Algorithm 1. In this algorithm, we first individually set attention weights for each of the features in all the samples to zero and monitor the effect on the desired fairness measure. We have demonstrated the algorithm for SPD, but other measures, such as EqOdd, EqOpp, and even accuracy can be used (in which case the \"unfair_feature_set\" can be re-named to feature set which harms accuracy instead of fairness). If the k th feature contributes to unfairness, we reduce its attention weight using decay rate value. This is because\u0177 k z captures the exclusion of the k th feature (zeroed attention weight for that feature) compared to the original outcome\u0177 o for when all the feature weights are intact; otherwise, we use the original attention weight. We can also control the fairness-accuracy trade-off by putting more attention weight on features that boost accuracy while keeping the fairness of the model the same and down-weighting features that hurt accuracy, fairness, or both.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Mitigation by Removing Unfair Features", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "This post-processing technique has a couple of advantages over previous works in bias mitigation or fair classification approaches. First, the postprocessing approach is computationally efficient as it does not require model retraining to ensure fairness for each sensitive attribute separately. Instead, the model is trained once by incorporating all the attributes, and then one manipulates attention weights during test time according to particular needs and use-cases. Second, the proposed mitigation method provides an explanation and can control the fairness-accuracy trade-off. This is because manipulating the attention weights reveals which features are important for getting the desired outcome, and by how much. This provides an explanation for the outcome and also a mechanism to control the fairness-accuracy trade-off by the amount of the manipulation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Mitigation by Removing Unfair Features", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "We perform a suite of experiments on synthetic and real-world datasets to evaluate our attention based interpretable fairness framework. The experiments on synthetic data are intended to elucidate interpretability in controlled settings, where we can manipulate the relations between input and output feature. The experiments on real-world data aim to validate the effectiveness of the proposed approach on both tabular and non-tabular (textual) data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We enumerate the experiments and their goals as follows: Experiment 1: Attributing Fairness with Attention The purpose of this experiment is to demonstrate that our attribution framework can capture correct attributions of features to fairness outcomes. We present our results for tabular data in Sec. 4.1. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Types of Experiments", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To validate the attribution framework, we created two synthetic datasets in which we control how features interact with each other and contribute to the accuracy and fairness of the outcome variable. These datasets capture some of the common scenarios, namely the data imbalance (skewness) and indirect discrimination issues, arising in fair decision or classification problems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Synthetic Data", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "Scenario 1: First, we create a simple scenario to demonstrate that our framework identifies correct feature attributions for fairness and accuracy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Synthetic Data", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "We create a feature that is correlated with the outcome (responsible for accuracy), a discrete feature that causes the prediction outcomes to be biased (responsible for fairness), and a continuous feature that is independent of the label or the task (irrelevant for the task). For intuition, suppose the attention-based attribution framework works correctly. In this case, we expect to see a reduction in accuracy upon removing (i.e., making the attention weight zero) the feature responsible for the accuracy, reduction in bias upon removing the feature responsible for bias, and very little or no change upon removing the irrelevant feature. With this objective, we generated a synthetic dataset with three features, i.e., x = [f 1 , f 2 , f 3 ] as follows 2 :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Synthetic Data", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "f 1 \u223c Ber(0.9) f 2 \u223c Ber(0.5) f 3 \u223cN (0, 1) y \u223c Ber(0.9) if f 2 = 1 Ber(0.1) if f 2 = 0", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Synthetic Data", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "Clearly, f 2 has the most predictive information for the task and is responsible for accuracy. Here, we consider f 1 as the sensitive attribute. f 1 is an imbalanced feature that can bias the outcome and is generated such that there is no intentional correlation between f 1 and the outcome, y or f 2 . f 3 is sampled from a normal distribution independent of the outcome y, or the other features, making it irrelevant for the task. Thus, an ideal classifier would be fair if it captures the correct outcome without being affected by the imbalance in f 1 . However, due to limited data and skew in f 1 , there will be some undesired bias -few errors when f 1 = 0 can lead to large statistical parity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Synthetic Data", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "Scenario 2: Using features that are not identified as sensitive attributes can result in unfair decisions due to their implicit relations or correlations with the sensitive attributes. This phenomenon is called indirect discrimination (Zliobaite, 2015; Hajian and Domingo-Ferrer, 2013; Zhang et al., 2017) . We designed this synthetic dataset to demonstrate and characterize the behavior of our framework under indirect discrimination. Similar to the previous scenario, we consider three features. Here, f 1 is considered as the sensitive attribute, and f 2 is correlated with f 1 and the outcome, y. The generative process is as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 235, |
|
"end": 252, |
|
"text": "(Zliobaite, 2015;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 253, |
|
"end": 285, |
|
"text": "Hajian and Domingo-Ferrer, 2013;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 286, |
|
"end": 305, |
|
"text": "Zhang et al., 2017)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Synthetic Data", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "f 1 \u223c Ber(0.9) if f 2 = 1 Ber(0.1) if f 2 = 0 f 2 \u223c Ber(0.5) f 3 \u223c N (0, 1) y \u223c Ber(0.7) if f 2 = 1 Ber(0.3) if f 2 = 0", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Synthetic Data", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "In this case f 1 and y are correlated with f 2 . The model should mostly rely on f 2 for its decisions. However, due to the correlation between f 1 and f 2 , we expect f 2 to affect both the accuracy and fairness of the model. Thus, in this case, indirect discrimination is possible. Using such a synthetic dataset, we demonstrate a) indirect discrimination and b) the need to have an attribution framework to reason about unfairness and not blindly focus on the sensitive attributes for bias mitigation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Synthetic Data", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "We demonstrate our approach on the following real-world datasets: Tabular Datasets: We conduct our experiments on two real-world tabular datasets -UCI Adult (Dua and Graff, 2017) and Heritage Health 3 datasets. The UCI Adult dataset contains census information about individuals, with the prediction task being whether the income of the individual is higher than $50k or not. The sensitive attribute, in this case, is gender (male/female). The Heritage Health dataset contains patient information, and the task is to predict the Charleson Index (comorbidity index, which is a patient survival indicator). Each patient is grouped into one of the 9 possible age groups, and we consider this as the sensitive attribute. We used the same pre-processing and train-test splits as in Gupta et al. (2021) . Non-Tabular or Text Dataset: We also experiment with a non-tabular, text dataset. We used the biosbias dataset (De-Arteaga et al., 2019). The dataset contains short bios of individuals. The task is to predict the occupation of the individual from their bio. We utilized the bios from the year 2018 from the 2018_34 archive and considered two occupations for our experiments, namely, nurse and dentist. The dataset was split into 70-15-15 train, validation, and test splits. De-Arteaga et al. (2019) has demonstrated the existence of gender bias in this prediction task and showed that certain gender words are associated with certain job types (e.g., she to nurse and he to dentist).", |
|
"cite_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 178, |
|
"text": "(Dua and Graff, 2017)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 777, |
|
"end": 796, |
|
"text": "Gupta et al. (2021)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1273, |
|
"end": 1297, |
|
"text": "De-Arteaga et al. (2019)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Real-world Datasets", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "For our baselines, we consider methods that learn representations of data so that information about sensitive attributes is eliminated. CVIB (Moyer et al., 2018) realizes this objective through a conditional variational autoencoder, whereas MIFR (Song et al., 2019 ) uses a combination of information bottleneck term and adversarial learning to optimize the fairness objective. FCRL (Gupta et al., 2021) optimizes information theoretic objectives that can be used to achieve good trade-offs between fairness and accuracy by using specialized contrastive information estimators. In addition to information-theoretic approaches, we also considered baselines that use adversarial learning such as MaxEnt-ARL (Roy and Boddeti, 2019), LAFTR (Madras et al., 2018) , and Adversarial Forgetting (Jaiswal et al., 2020) . Note that in contrast to our approach, the baselines described above are not interpretable as they are incapable of directly attributing features to fairness outcomes. For the textual data, we compare our approach with the debiasing technique proposed in De-Arteaga et al. 2019, which works by masking the gender-related words and then training the model on this masked data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 141, |
|
"end": 161, |
|
"text": "(Moyer et al., 2018)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 246, |
|
"end": 264, |
|
"text": "(Song et al., 2019", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 383, |
|
"end": 403, |
|
"text": "(Gupta et al., 2021)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 736, |
|
"end": 757, |
|
"text": "(Madras et al., 2018)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 787, |
|
"end": 809, |
|
"text": "(Jaiswal et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bias Mitigation Baselines", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "First, we test our method's ability to capture correct attributions in controlled experiments with synthetic data (described in Sec. 3.2.1). We also conduct a similar experiment with UCI Adult and Heritage Health datasets which can be found in the appendix. Fig. 2 summarizes our results by visualizing the attributions, which we now discuss.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 258, |
|
"end": 264, |
|
"text": "Fig. 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Attributing Fairness with Attention", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In Scenario 1, as expected, f 2 is correctly attributed to being responsible for the accuracy and removing it hurts the accuracy drastically. Similarly, f 1 is correctly shown to be responsible for unfairness and removing it creates a fairer outcome. Ideally, the model should not be using any information about f 1 as it is independent of the task, but it does. Therefore, by removing f 1 , we can ensure that information is not used and hence outcomes are fair. Lastly, as expected, f 3 was the irrelevant feature, and its effects on accuracy and fairness are negligible.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attributing Fairness with Attention", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In Scenario 2, our framework captures the effect of indirect discrimination. We can see that removing f 2 reduces bias as well as accuracy drastically. This is because f 2 is the predictive feature, but due to its correlation with f 1 , it can also indirectly affect the model's fairness. More interestingly, although f 1 is the sensitive feature, removing it does not play a drastic role in fairness or the accuracy. This is an important finding as it shows why removing f 1 on its own can not give us a fairer model due to the existence of correlations to other features and indirect discrimination. Overall, our results are intuitive and thus validate our assumption that attention-based framework can provide reliable feature attributions for the fairness and accuracy of the model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attributing Fairness with Attention", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "As we have highlighted earlier, understanding how the information within features interact and contribute to the decision making can be used to design effective bias mitigation strategies. One such example was shown in Sec. 4.1. Often realworld datasets have features which cause indirect discrimination, due to which fairness can not be achieved by simply eliminating the sensitive feature from the decision process. Using the attributions derived from our attention-based attribution framework, we propose a post-processing mitigation strategy. Our strategy is to intervene on attention weights as discussed in Sec. 2.3. We first attribute and identify the features responsible for the unfairness of the outcomes, i.e., all the features whose exclusion will decrease the bias compared to the original model's outcomes and gradually decrease their attention weights to zero as also outlined in Algorithm 1. We do this by first using the whole fraction of the attention weights learned and gradually use less fraction of the weights until the weights are completely zeroed out. For all the baselines described in Sec. 3.3, we used the approach outlined in Gupta et al. (2021) for training a downstream classifier and evaluating the accuracy/fairness trade-offs. The downstream classifier was a 1-hidden-layer MLP with 50 neurons along with ReLU activation function. Each method was trained with five different seeds, and we report the average accuracy and fairness measure as statistical parity difference (SPD). Results for other fairness notions can be found in the appendix. CVIB, MaxEnt-ARL, Adversarial Forgetting and FCRL are designed for statistical parity notion of fairness and are not applicable for other measures like Equalized Odds and Equality of Opportunity. LAFTR can only deal with binary sensitive attributes and thus not applicable for Heritage Health dataset. Notice that our approach does not have these limitations. For our approach, we vary the attention weights and report the resulting fairness-accuracy trade offs. Fig. 3 compares fairness-accuracy trade-offs of different bias mitigation approaches. We desire outcomes to be fairer, i.e., lower values of SPD and to be more accurate, i.e., towards the right. The results show that using attention attributions can indeed be beneficial for reducing bias. Moreover, our mitigation framework based on the ma- nipulation of the attention weights is competitive with state-of-the-art mitigation strategies. However, most of these approaches are specifically designed and optimized to achieve parity and do not provide any interpretability. Our model can not only achieve comparable and competitive results, but it is also able to provide explanation such that the users exactly know what feature and by how much it was manipulated to get the corresponding outcome. Another advantage of our model is that it needs only one round of training. The adjustments to attention weights are made post-training; thus, it is possible to achieve different trade-offs. Moreover, our approach does not need to know sensitive attributes while training; thus, it could work with other sensitive attributes not known beforehand or during training. Lastly, here we merely focused on mitigating bias as our goal was to show that the attribution framework can identify problematic features and their removal would result in bias mitigation. We manipulated attention weights of all the features that contributed to unfairness irrespective of if they helped maintaining high accuracy or not. However, the trade-off results can be improved by carefully considering the trade-off each feature contributes to with regards to both accuracy and fairness to achieve better trade-off results which can be investigated as a future direction. The advantage of our work is that this trade-off curve can be controlled by controlling how many features and by how much to be manipulated.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1156, |
|
"end": 1175, |
|
"text": "Gupta et al. (2021)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 2041, |
|
"end": 2047, |
|
"text": "Fig. 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Attention as a Mitigation Technique", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In addition to providing interpretability, our approach is flexible and useful for controlling fairness in modalities other than tabular datasets. To put this to the test, we applied our model to mitigate bias in text-based data. We consider the biosbias dataset (De-Arteaga et al., 2019) , and use our mitigation technique to reduce observed biases in the classification task performed on this dataset. We compare our approach with the debiasing technique proposed in the original paper (De-Arteaga et al., 2019) , which works by masking the gender-related words and then training the model on this masked data. As discussed earlier, such a method is computationally inefficient. It requires re-training the model or creating a new masked dataset, each time it is required to debias the model against different attributes, such as gender vs. race. For the baseline preprocessing method, we masked the gender-related words, such as names and gender words, as provided in the biosbias dataset and trained the model on the filtered dataset. On the other hand, we trained the model on the raw bios for our postprocessing method and only manipulated attention weights of the gender words during the testing process as also provided in the biosbias dataset. In order to measure the bias, we used the same measure as in (De-Arteaga et al., 2019) which is based on the equality of opportunity notion of fairness (Hardt et al., 2016) and reported the True Positive Rate Difference (TPRD) for each occupation amongst different genders. As shown in Table 1 , our post-processing mitigation technique provides lower TRPD while being more accurate, followed by the technique that masks the gendered words before training. Although both methods reduce the bias compared to a model trained on raw bios without applying any mask or invariance to gendered words, our post-processing method Table 1 : Difference of the True Positive Rates (TPRD) amongst different genders for the dentist and nurse occupations on the biosbias dataset. Our introduced post-processing method is the most effective in reducing the disparity for both occupations compared to the pre-processing technique. is more effective. Fig. 4 also highlights qualitative differences between models in terms of their most attentive features for the prediction task. As shown in the results, our post-processing technique is able to use more meaningful words, such as R.N. (registered nurse) to predict the outcome label nurse compared to both baselines, while the non-debiased model focuses on gendered words.", |
|
"cite_spans": [ |
|
{ |
|
"start": 263, |
|
"end": 288, |
|
"text": "(De-Arteaga et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 488, |
|
"end": 513, |
|
"text": "(De-Arteaga et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1314, |
|
"end": 1339, |
|
"text": "(De-Arteaga et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1405, |
|
"end": 1425, |
|
"text": "(Hardt et al., 2016)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1539, |
|
"end": 1546, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1874, |
|
"end": 1881, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2186, |
|
"end": 2192, |
|
"text": "Fig. 4", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments with Non-Tabular Data", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Fairness. The research in fairness concerns itself with various topics (Mehrabi et al., 2021) . In this work, we utilized different metrics that were introduced previously (Dwork et al., 2012; Hardt et al., 2016) , to measure the amount of bias. We also used different bias mitigation strategies to compare against our mitigation strategy, such as FCRL (Gupta et al., 2021) , CVIB (Moyer et al., 2018) , MIFR (Song et al., 2019) , adversarial forgetting (Jaiswal et al., 2020) , MaxEnt-ARL (Roy and Boddeti, 2019), and LAFTR (Madras et al., 2018) . We also utilized concepts and datasets that were analyzing existing biases in NLP systems, such as (De-Arteaga et al., 2019) which studied the existing biases in NLP systems on the occupation classification task on the bios dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 71, |
|
"end": 93, |
|
"text": "(Mehrabi et al., 2021)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 172, |
|
"end": 192, |
|
"text": "(Dwork et al., 2012;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 193, |
|
"end": 212, |
|
"text": "Hardt et al., 2016)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 353, |
|
"end": 373, |
|
"text": "(Gupta et al., 2021)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 381, |
|
"end": 401, |
|
"text": "(Moyer et al., 2018)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 409, |
|
"end": 428, |
|
"text": "(Song et al., 2019)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 454, |
|
"end": 476, |
|
"text": "(Jaiswal et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 525, |
|
"end": 546, |
|
"text": "(Madras et al., 2018)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Interpretability. There is a body of work in NLP literature that tried to analyze the effect of the attention weights on interpretability of the model (Wiegreffe and Pinter, 2019; Jain and Wallace, 2019; Serrano and Smith, 2019) . Other work also utilized attention weights to define an attribution score to be able to reason about how transformer models such as BERT work (Hao et al., 2021) . Notice that although Jain and Wallace (2019) claim that attention might not be explanation, a body of work has proved otherwise including (Wiegreffe and Pinter, 2019) in which authors directly target the work in Jain and Wallace (2019) and analyze in detail the problems associated with this study. In addition, Vig et al. (2020) analyze the effect of the attention weights in transformer models for bias analysis in language models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 151, |
|
"end": 179, |
|
"text": "(Wiegreffe and Pinter, 2019;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 180, |
|
"end": 203, |
|
"text": "Jain and Wallace, 2019;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 204, |
|
"end": 228, |
|
"text": "Serrano and Smith, 2019)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 373, |
|
"end": 391, |
|
"text": "(Hao et al., 2021)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 532, |
|
"end": 560, |
|
"text": "(Wiegreffe and Pinter, 2019)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 606, |
|
"end": 629, |
|
"text": "Jain and Wallace (2019)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 706, |
|
"end": 723, |
|
"text": "Vig et al. (2020)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this work, we analyzed how attention weights contribute to fairness and accuracy of a predictive model. We proposed an attribution method that leverages the attention mechanism and showed the effectiveness of this approach on both tabular and text data. Using this interpretable attribution framework we then introduced a post-processing bias mitigation strategy based on attention weight manipulation. We validated the proposed framework by conducting experiments with different baselines, fairness metrics, and data modalities.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Although our work can have a positive impact in allowing to reason about fairness and accuracy of models and reduce their bias, it can also have negative societal consequences if used unethically. For instance, it has been previously shown that interpretability frameworks can be used as a means for fairwashing which is when malicious users generate fake explanations for their unfair decisions to justify them (Anders et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 412, |
|
"end": 433, |
|
"text": "(Anders et al., 2020)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Broader Impact", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In addition, previously it has been shown that interpratability frameworks are vulnerable against adversarial attacks (Slack et al., 2020) . We acknowledge that our framework may also be targeted by malicious users for malicious intent that can manipulate attention weights to either generate fake explanations or unfair outcomes. We also acknowledge that our method is not achieving the best accuracy-fairness trade-off on the UCI Adult dataset for the statistical parity notion of fairness and has room for improvement.", |
|
"cite_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 138, |
|
"text": "(Slack et al., 2020)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Broader Impact", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We included additional bias mitigation results using other fairness metrics, such as equality of opportunity and equalized odds on both of the Adult and Heritage Health datasets in this supplementary material. We also included additional postprocessing results along with additional qualitative results both for the tabular and non-tabular dataset experiments. More details can be found under each sub-section.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Appendix", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Here, we show the results of our mitigation framework considering equality of opportunity and equalized odds notions of fairness. We included baselines that were applicable for these notions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Results on Tabular Data", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Notice not all the baselines we used in our previous analysis for statistical parity were applicable for equality of opportunity and equalized odds notions of fairness; thus, we only included the applicable ones. In addition, LAFTR is only applicable when the sensitive attribute is a binary variable, so it was not applicable to be included in the analysis for the heritage health data where the sensitive attribute is non-binary. Results of these analysis is shown in Figures 8 and 9 . We once again show competitive and comparable results to other baseline methods, while having the advantage of being interpretable and not requiring multiple trainings to satisfy different fairness notions or fairness on different sensitive attributes. Our framework is also flexible for different fairness measures and can be applied to binary or non-binary sensitive features.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 470, |
|
"end": 485, |
|
"text": "Figures 8 and 9", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.1 Results on Tabular Data", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In addition, we show how different features contribute differently under different fairness notions. Fig. 5 demonstrates the top three features that contribute to unfairness the most along with the percentages of the fairness improvement upon their removal for each of the fairness notions. As observed from the results, while equality of opportunity and equalized odds are similar in terms of their problematic features, statistical parity has different trends. This is also expected as equality of opportunity and equalized odds are similar fairness notions in nature compared to statistical parity.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 107, |
|
"text": "Fig. 5", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.1 Results on Tabular Data", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We also compared our mitigation strategy with the Hardt etl al. post-processing approach (Hardt et al., 2016) . Using this post-processing imple- mentation 4 , we obtained the optimal solution that tries to satisfy different fairness notions subject to accuracy constraints. For our results, we put the results from zeroing out all the attention weights corresponding to the problematic features that were detected from our interpretability framework. However, notice that since our mitigation strategy can control different trade-offs we can have different results depending on the scenario.", |
|
"cite_spans": [ |
|
{ |
|
"start": 89, |
|
"end": 109, |
|
"text": "(Hardt et al., 2016)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Results on Tabular Data", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Here, we reported the results from zeroing out the problematic attention weights that is targeting fairness mostly. From the results demonstrated in Tables 3 and 4, we can see comparable numbers to those obtained from (Hardt et al., 2016) . This again shows that our interpretability framework yet again captures the correct responsible features and that the mitigation strategy works as expected.", |
|
"cite_spans": [ |
|
{ |
|
"start": 218, |
|
"end": 238, |
|
"text": "(Hardt et al., 2016)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Results on Tabular Data", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We also included some additional qualitative results from the experiments on non-tabular data in Fig. 6 . Fig. 7 shows results on a subset of the features from the UCI Adult and Heritage Health datasets (to keep the plots uncluttered and readable, we incorporated the most interesting features in the plot), and provide some intuition about how different features in these datasets contribute to the model fairness and accuracy. While features such as capital gain and capital loss in the UCI Adult dataset are responsible for improving accuracy and reducing bias, we can observe that features such as relationship or marital status, which can be indirectly correlated with the feature sex, have a negative impact on fairness. For the Heritage Health dataset, including the features drugCount ave and dsfs max provide accuracy gains but at the expense of fairness, while including no Claims and no Spe- cialities negatively impact both accuracy and fairness.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 97, |
|
"end": 103, |
|
"text": "Fig. 6", |
|
"ref_id": "FIGREF6" |
|
}, |
|
{ |
|
"start": 106, |
|
"end": 112, |
|
"text": "Fig. 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.2 Results on non-tabular Data", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "More details about each of the datasets along with the descriptions of each feature for the Adult dataset can be found at 5 and for the Heritage Health dataset can be found at 6 . In our qualitative results, we used the feature names as marked in these datasets. If the names or acronyms are unclear kindly reference to the references mentioned for more detailed description for each of the features. Although most of the features in the Adult datasets are self-descriptive, Heritage Health dataset includes some abbreviations that we list in Table 2 for the ease of interpreting each feature's meaning. Figure 7 : Results from the real-world datasets. Note that in our\u0177 z notation we replaced indexes with actual feature names for clarity in these results on real-world datasets as there is not one universal indexing schema, but the feature names are more universal and discriptive for this case. Labels on the points represent the feature name that was removed (zeroed out) according to our\u0177 z notation. The results show how the accuracy and fairness of the model (in terms of statistical parity difference) change by exclusion of each feature. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 543, |
|
"end": 550, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 604, |
|
"end": 612, |
|
"text": "Figure 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.4 Information on Datasets and Features", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We describe and use the definition of these fairness measures as implemented in Fairlearn package(Bird et al., 2020).(a) Classification model. (b) Attribution framework.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use x \u223c Ber(p) to denote that x is a Bernoulli random variable with P (x = 1) = p.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.kaggle.com/c/hhp", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://fairlearn.org", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://archive.ics.uci.edu/ml/datasets/adult 6 https://www.kaggle.com/c/hhp", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank anonymous reviewers for providing insightful feedback. Ninareh Mehrabi's research was funded by USC + Amazon Center on Secure and Trusted Machine Learning fellowship.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Fairwashing explanations with off-manifold detergent", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Anders", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Plamen", |
|
"middle": [], |
|
"last": "Pasliev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ann-Kathrin", |
|
"middle": [], |
|
"last": "Dombrowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus-Robert", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pan", |
|
"middle": [], |
|
"last": "Kessel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 37th International Conference on Machine Learning", |
|
"volume": "119", |
|
"issue": "", |
|
"pages": "314--323", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher Anders, Plamen Pasliev, Ann-Kathrin Dombrowski, Klaus-Robert M\u00fcller, and Pan Kessel. 2020. Fairwashing explanations with off-manifold detergent. In Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pages 314-323. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Big data's disparate impact", |
|
"authors": [ |
|
{ |
|
"first": "Solon", |
|
"middle": [], |
|
"last": "Barocas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Andrew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Selbst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Calif. L. Rev", |
|
"volume": "104", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Solon Barocas and Andrew D Selbst. 2016. Big data's disparate impact. Calif. L. Rev., 104:671.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Fairlearn: A toolkit for assessing and improving fairness in ai", |
|
"authors": [ |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Bird", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miro", |
|
"middle": [], |
|
"last": "Dud\u00edk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Edgar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brandon", |
|
"middle": [], |
|
"last": "Horn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Lutz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vanessa", |
|
"middle": [], |
|
"last": "Milan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mehrnoosh", |
|
"middle": [], |
|
"last": "Sameki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanna", |
|
"middle": [], |
|
"last": "Wallach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathleen", |
|
"middle": [], |
|
"last": "Walker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarah Bird, Miro Dud\u00edk, Richard Edgar, Brandon Horn, Roman Lutz, Vanessa Milan, Mehrnoosh Sameki, Hanna Wallach, and Kathleen Walker. 2020. Fairlearn: A toolkit for assessing and improv- ing fairness in ai. Technical Report MSR-TR-2020- 32, Microsoft.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Bias in bios: A case study of semantic representation bias in a high-stakes setting", |
|
"authors": [ |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "De-Arteaga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexey", |
|
"middle": [], |
|
"last": "Romanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanna", |
|
"middle": [], |
|
"last": "Wallach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennifer", |
|
"middle": [], |
|
"last": "Chayes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Borgs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Chouldechova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sahin", |
|
"middle": [], |
|
"last": "Geyik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Krishnaram", |
|
"middle": [], |
|
"last": "Kenthapadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam Tauman", |
|
"middle": [], |
|
"last": "Kalai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "proceedings of the Conference on Fairness, Accountability, and Transparency", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "120--128", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maria De-Arteaga, Alexey Romanov, Hanna Wal- lach, Jennifer Chayes, Christian Borgs, Alexandra Chouldechova, Sahin Geyik, Krishnaram Kentha- padi, and Adam Tauman Kalai. 2019. Bias in bios: A case study of semantic representation bias in a high-stakes setting. In proceedings of the Confer- ence on Fairness, Accountability, and Transparency, pages 120-128.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "UCI machine learning repository", |
|
"authors": [ |
|
{ |
|
"first": "Dheeru", |
|
"middle": [], |
|
"last": "Dua", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Casey", |
|
"middle": [], |
|
"last": "Graff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dheeru Dua and Casey Graff. 2017. UCI machine learning repository.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Fairness through awareness", |
|
"authors": [ |
|
{ |
|
"first": "Cynthia", |
|
"middle": [], |
|
"last": "Dwork", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Moritz", |
|
"middle": [], |
|
"last": "Hardt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Toniann", |
|
"middle": [], |
|
"last": "Pitassi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Reingold", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Zemel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 3rd Innovations in Theoretical Computer Science Conference, ITCS '12", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "214--226", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cynthia Dwork, Moritz Hardt, Toniann Pitassi, Omer Reingold, and Richard Zemel. 2012. Fairness through awareness. In Proceedings of the 3rd In- novations in Theoretical Computer Science Confer- ence, ITCS '12, pages 214-226, New York, NY, USA. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Controllable guarantees for fair outcomes via contrastive information estimation", |
|
"authors": [ |
|
{ |
|
"first": "Umang", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Aaron", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bistra", |
|
"middle": [], |
|
"last": "Ferber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [ |
|
"Ver" |
|
], |
|
"last": "Dilkina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Steeg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "35", |
|
"issue": "", |
|
"pages": "7610--7619", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Umang Gupta, Aaron M Ferber, Bistra Dilkina, and Greg Ver Steeg. 2021. Controllable guarantees for fair outcomes via contrastive information estima- tion. In Proceedings of the AAAI Conference on Ar- tificial Intelligence, volume 35, pages 7610-7619.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Explainable ai under contract and tort law: legal incentives and technical challenges", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Hacker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralf", |
|
"middle": [], |
|
"last": "Krestel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Grundmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Naumann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Artificial Intelligence and Law", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--25", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Hacker, Ralf Krestel, Stefan Grundmann, and Felix Naumann. 2020. Explainable ai under contract and tort law: legal incentives and technical chal- lenges. Artificial Intelligence and Law, pages 1-25.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A methodology for direct and indirect discrimination prevention in data mining", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Hajian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Domingo-Ferrer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "IEEE Transactions on Knowledge and Data Engineering", |
|
"volume": "25", |
|
"issue": "7", |
|
"pages": "1445--1459", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Hajian and J. Domingo-Ferrer. 2013. A methodol- ogy for direct and indirect discrimination prevention in data mining. IEEE Transactions on Knowledge and Data Engineering, 25(7):1445-1459.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Self-attention attribution: Interpreting information interactions inside transformer", |
|
"authors": [ |
|
{ |
|
"first": "Yaru", |
|
"middle": [], |
|
"last": "Hao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Furu", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ke", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "35", |
|
"issue": "", |
|
"pages": "12963--12971", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yaru Hao, Li Dong, Furu Wei, and Ke Xu. 2021. Self-attention attribution: Interpreting information interactions inside transformer. In Proceedings of the AAAI Conference on Artificial Intelligence, vol- ume 35, pages 12963-12971.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Equality of opportunity in supervised learning", |
|
"authors": [ |
|
{ |
|
"first": "Moritz", |
|
"middle": [], |
|
"last": "Hardt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Price", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Price", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nati", |
|
"middle": [], |
|
"last": "Srebro", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "29", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Moritz Hardt, Eric Price, Eric Price, and Nati Srebro. 2016. Equality of opportunity in supervised learn- ing. In Advances in Neural Information Processing Systems, volume 29. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Attention is not Explanation", |
|
"authors": [ |
|
{ |
|
"first": "Sarthak", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Byron", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Wallace", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "3543--3556", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarthak Jain and Byron C. Wallace. 2019. Attention is not Explanation. In Proceedings of the 2019 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long and Short Pa- pers), pages 3543-3556, Minneapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Greg Ver Steeg, Wael AbdAlmageed, and Premkumar Natarajan. 2020. Invariant representations through adversarial forgetting", |
|
"authors": [ |
|
{ |
|
"first": "Ayush", |
|
"middle": [], |
|
"last": "Jaiswal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Moyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "34", |
|
"issue": "", |
|
"pages": "4272--4279", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ayush Jaiswal, Daniel Moyer, Greg Ver Steeg, Wael AbdAlmageed, and Premkumar Natarajan. 2020. Invariant representations through adversarial forget- ting. Proceedings of the AAAI Conference on Artifi- cial Intelligence, 34(04):4272-4279.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Fairness-aware classifier with prejudice remover regularizer", |
|
"authors": [ |
|
{ |
|
"first": "Toshihiro", |
|
"middle": [], |
|
"last": "Kamishima", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shotaro", |
|
"middle": [], |
|
"last": "Akaho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hideki", |
|
"middle": [], |
|
"last": "Asoh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Joint European Conference on Machine Learning and Knowledge Discovery in Databases", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "35--50", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Toshihiro Kamishima, Shotaro Akaho, Hideki Asoh, and Jun Sakuma. 2012. Fairness-aware classi- fier with prejudice remover regularizer. In Joint European Conference on Machine Learning and Knowledge Discovery in Databases, pages 35-50. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Learning adversarially fair and transferable representations", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Madras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elliot", |
|
"middle": [], |
|
"last": "Creager", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Toniann", |
|
"middle": [], |
|
"last": "Pitassi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Zemel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 35th International Conference on Machine Learning", |
|
"volume": "80", |
|
"issue": "", |
|
"pages": "3384--3393", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Madras, Elliot Creager, Toniann Pitassi, and Richard Zemel. 2018. Learning adversarially fair and transferable representations. In Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pages 3384-3393. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Yuzhong Huang, and Fred Morstatter. 2020. Statistical equity: A fairness classification objective", |
|
"authors": [ |
|
{ |
|
"first": "Ninareh", |
|
"middle": [], |
|
"last": "Mehrabi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2005.07293" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ninareh Mehrabi, Yuzhong Huang, and Fred Morstat- ter. 2020. Statistical equity: A fairness classification objective. arXiv preprint arXiv:2005.07293.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "A survey on bias and fairness in machine learning", |
|
"authors": [ |
|
{ |
|
"first": "Ninareh", |
|
"middle": [], |
|
"last": "Mehrabi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fred", |
|
"middle": [], |
|
"last": "Morstatter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nripsuta", |
|
"middle": [], |
|
"last": "Saxena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Lerman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aram", |
|
"middle": [], |
|
"last": "Galstyan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "ACM Comput. Surv", |
|
"volume": "", |
|
"issue": "6", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ninareh Mehrabi, Fred Morstatter, Nripsuta Saxena, Kristina Lerman, and Aram Galstyan. 2021. A sur- vey on bias and fairness in machine learning. ACM Comput. Surv., 54(6).", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Invariant representations without adversarial training", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Moyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuyang", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rob", |
|
"middle": [], |
|
"last": "Brekelmans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aram", |
|
"middle": [], |
|
"last": "Galstyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [ |
|
"Ver" |
|
], |
|
"last": "Steeg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "31", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Moyer, Shuyang Gao, Rob Brekelmans, Aram Galstyan, and Greg Ver Steeg. 2018. Invariant rep- resentations without adversarial training. In Ad- vances in Neural Information Processing Systems, volume 31. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Mitigating information leakage in image representations: A maximum entropy approach", |
|
"authors": [ |
|
{ |
|
"first": "Vishnu Naresh", |
|
"middle": [], |
|
"last": "Proteek Chandan Roy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Boddeti", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2586--2594", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Proteek Chandan Roy and Vishnu Naresh Boddeti. 2019. Mitigating information leakage in image rep- resentations: A maximum entropy approach. In Proceedings of the IEEE/CVF Conference on Com- puter Vision and Pattern Recognition, pages 2586- 2594.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Is attention interpretable?", |
|
"authors": [ |
|
{ |
|
"first": "Sofia", |
|
"middle": [], |
|
"last": "Serrano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2931--2951", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sofia Serrano and Noah A. Smith. 2019. Is attention interpretable? In Proceedings of the 57th Annual Meeting of the Association for Computational Lin- guistics, pages 2931-2951, Florence, Italy. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Fooling lime and shap: Adversarial attacks on post hoc explanation methods", |
|
"authors": [ |
|
{ |
|
"first": "Dylan", |
|
"middle": [], |
|
"last": "Slack", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sophie", |
|
"middle": [], |
|
"last": "Hilgard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Himabindu", |
|
"middle": [], |
|
"last": "Lakkaraju", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the AAAI/ACM Conference on AI, Ethics, and Society", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "180--186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dylan Slack, Sophie Hilgard, Emily Jia, Sameer Singh, and Himabindu Lakkaraju. 2020. Fooling lime and shap: Adversarial attacks on post hoc explanation methods. In Proceedings of the AAAI/ACM Confer- ence on AI, Ethics, and Society, pages 180-186.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Learning controllable fair representations", |
|
"authors": [ |
|
{ |
|
"first": "Jiaming", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pratyusha", |
|
"middle": [], |
|
"last": "Kalluri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Grover", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shengjia", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefano", |
|
"middle": [], |
|
"last": "Ermon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of Machine Learning Research", |
|
"volume": "89", |
|
"issue": "", |
|
"pages": "2164--2173", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiaming Song, Pratyusha Kalluri, Aditya Grover, Shengjia Zhao, and Stefano Ermon. 2019. Learn- ing controllable fair representations. In Proceed- ings of Machine Learning Research, volume 89 of Proceedings of Machine Learning Research, pages 2164-2173. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems, volume 30. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Investigating gender bias in language models using causal mediation analysis", |
|
"authors": [ |
|
{ |
|
"first": "Jesse", |
|
"middle": [], |
|
"last": "Vig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Gehrmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Belinkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharon", |
|
"middle": [], |
|
"last": "Qian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Nevo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaron", |
|
"middle": [], |
|
"last": "Singer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stuart", |
|
"middle": [], |
|
"last": "Shieber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "33", |
|
"issue": "", |
|
"pages": "12388--12401", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jesse Vig, Sebastian Gehrmann, Yonatan Belinkov, Sharon Qian, Daniel Nevo, Yaron Singer, and Stuart Shieber. 2020. Investigating gender bias in language models using causal mediation analysis. In Ad- vances in Neural Information Processing Systems, volume 33, pages 12388-12401. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Attention is not not explanation", |
|
"authors": [ |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Wiegreffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuval", |
|
"middle": [], |
|
"last": "Pinter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarah Wiegreffe and Yuval Pinter. 2019. Attention is not not explanation. In Proceedings of the 2019 Conference on Empirical Methods in Natu- ral Language Processing and the 9th International Joint Conference on Natural Language Process- ing (EMNLP-IJCNLP), pages 11-20, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Fairness Constraints: Mechanisms for Fair Classification", |
|
"authors": [ |
|
{ |
|
"first": "Muhammad", |
|
"middle": [], |
|
"last": "Bilal Zafar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isabel", |
|
"middle": [], |
|
"last": "Valera", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manuel", |
|
"middle": [], |
|
"last": "Gomez Rogriguez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Krishna", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Gummadi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 20th International Conference on Artificial Intelligence and Statistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "962--970", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Muhammad Bilal Zafar, Isabel Valera, Manuel Gomez Rogriguez, and Krishna P. Gummadi. 2017. Fair- ness Constraints: Mechanisms for Fair Classifica- tion. In Proceedings of the 20th International Con- ference on Artificial Intelligence and Statistics, vol- ume 54 of Proceedings of Machine Learning Re- search, pages 962-970, Fort Lauderdale, FL, USA. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "A causal framework for discovering and removing direct and indirect discrimination", |
|
"authors": [ |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yongkai", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xintao", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence, IJCAI-17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3929--3935", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lu Zhang, Yongkai Wu, and Xintao Wu. 2017. A causal framework for discovering and removing di- rect and indirect discrimination. In Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence, IJCAI-17, pages 3929-3935.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Attentionbased bidirectional long short-term memory networks for relation classification", |
|
"authors": [ |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Tian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhenyu", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bingchen", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongwei", |
|
"middle": [], |
|
"last": "Hao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "207--212", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peng Zhou, Wei Shi, Jun Tian, Zhenyu Qi, Bingchen Li, Hongwei Hao, and Bo Xu. 2016. Attention- based bidirectional long short-term memory net- works for relation classification. In Proceedings of the 54th Annual Meeting of the Association for Com- putational Linguistics (Volume 2: Short Papers), pages 207-212, Berlin, Germany. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "A survey on measuring indirect discrimination in machine learning", |
|
"authors": [ |
|
{ |
|
"first": "Indre", |
|
"middle": [], |
|
"last": "Zliobaite", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1511.00148" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Indre Zliobaite. 2015. A survey on measuring indirect discrimination in machine learning. arXiv preprint arXiv:1511.00148.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"text": "Results from the synthetic datasets. Following the\u0177 o and\u0177 z notations,\u0177 o represents the original model outcome with all the attention weights intact, while\u0177 k z represents the outcome of the model in which the attention weights corresponding to k th feature are zeroed out (e.g.\u0177 1 z represents when attention weights of feature f 1 are zeroed out). The results show how the accuracy and fairness (SPD) of the model change by exclusion of each feature.", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"uris": null, |
|
"text": "Accuracy vs parity curves for UCI Adult and Heritage Health datasets.", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF4": { |
|
"num": null, |
|
"uris": null, |
|
"text": "Qualitative results from the non-tabular data experiment on the job classification task based on bio texts. Green regions are the top three words used by the model for its prediction based on the attention weights. While the Not Debiased Model mostly focuses on gendered words, our method focused on profession-based words, such as R.N. (Registered Nurse), to correctly predict \"nurse.\"", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF5": { |
|
"num": null, |
|
"uris": null, |
|
"text": "Top three features for each fairness definition removing which caused the most benefit in improving the corresponding fairness definition. The percentage of improvement upon removal is marked on the y-axis for adult and heritage health datasets.", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF6": { |
|
"num": null, |
|
"uris": null, |
|
"text": "Additional qualitative results from the non-tabular data experiment on the job classification task based on the bio texts. Green regions represent top three words that the model used for its prediction based on the attention weights.", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF7": { |
|
"num": null, |
|
"uris": null, |
|
"text": ") 0.77 (0.006) 0.012 (0.003) 0.81 (0.013) 0.020 (0.019) 0.81 (0.021) 0.027 (0.023) Hardt et al. 0.77 (0.012) 0.013 (0.005) 0.83 (0.005) 0.064 (0.016) 0.81 (0.007) 0.047 (0.014) Table 3: Adult results on post-processing approach from Hardt et al. vs our attention method when all problematic features are zeroed out. ) 0.68 (0.004) 0.04 (0.015) 0.68 (0.015) 0.15 (0.085) 0.68 (0.015) 0.10 (0.085) Hardt et al. 0.68 (0.005) 0.05 (0.018) 0.75 (0.001) 0.20 (0.033) 0.69 (0.012) 0.19 (0.031)Table 4: Heritage Health results on post-processing approach from Hardt et al. vs our attention method when all problematic features are zeroed out. 0.79 0.80 0.81 0.82 0.83 0.84 0Accuracy vs equality of opportunity curves for UCI Adult and Heritage Health datasets. Accuracy vs equalized odds curves for UCI Adult and Heritage Health datasets.", |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Experiment 2: Bias Mitigation via Attention Weight Manipulation In this experiment, we seek to validate the proposed post-processing bias mitigation framework and compare it with various recent mitigation approaches. The results for realworld tabular data are presented in Sec. 4.2." |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Some abbreviations used in Heritage Health dataset's feature names. These abbreviations are listed for clarity of interpreting each feature's meaning specifically in our qualitative analysis or attribution visualizations." |
|
} |
|
} |
|
} |
|
} |