|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:48:39.922365Z" |
|
}, |
|
"title": "Demoting Racial Bias in Hate Speech Detection", |
|
"authors": [ |
|
{ |
|
"first": "Mengzhou", |
|
"middle": [], |
|
"last": "Xia", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Language Technologies Institute Carnegie Mellon University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Anjalie", |
|
"middle": [], |
|
"last": "Field", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Language Technologies Institute Carnegie Mellon University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yulia", |
|
"middle": [], |
|
"last": "Tsvetkov", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Language Technologies Institute Carnegie Mellon University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In current hate speech datasets, there exists a high correlation between annotators' perceptions of toxicity and signals of African American English (AAE). This bias in annotated training data and the tendency of machine learning models to amplify it cause AAE text to often be mislabeled as abusive/offensive/hate speech with a high false positive rate by current hate speech classifiers. In this paper, we use adversarial training to mitigate this bias, introducing a hate speech classifier that learns to detect toxic sentences while demoting confounds corresponding to AAE texts. Experimental results on a hate speech dataset and an AAE dataset suggest that our method is able to substantially reduce the false positive rate for AAE text while only minimally affecting the performance of hate speech classification.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In current hate speech datasets, there exists a high correlation between annotators' perceptions of toxicity and signals of African American English (AAE). This bias in annotated training data and the tendency of machine learning models to amplify it cause AAE text to often be mislabeled as abusive/offensive/hate speech with a high false positive rate by current hate speech classifiers. In this paper, we use adversarial training to mitigate this bias, introducing a hate speech classifier that learns to detect toxic sentences while demoting confounds corresponding to AAE texts. Experimental results on a hate speech dataset and an AAE dataset suggest that our method is able to substantially reduce the false positive rate for AAE text while only minimally affecting the performance of hate speech classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The prevalence of toxic comments on social media and the mental toll on human moderators has generated much interest in automated systems for detecting hate speech and abusive language (Schmidt and Wiegand, 2017; Fortuna and Nunes, 2018) , especially language that targets particular social groups (Silva et al., 2016; Mondal et al., 2017; Mathew et al., 2019) . However, deploying these systems without careful consideration of social context can increase bias, marginalization, and exclusion (Bender and Friedman, 2018; Waseem and Hovy, 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 185, |
|
"end": 212, |
|
"text": "(Schmidt and Wiegand, 2017;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 213, |
|
"end": 237, |
|
"text": "Fortuna and Nunes, 2018)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 298, |
|
"end": 318, |
|
"text": "(Silva et al., 2016;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 319, |
|
"end": 339, |
|
"text": "Mondal et al., 2017;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 340, |
|
"end": 360, |
|
"text": "Mathew et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 494, |
|
"end": 521, |
|
"text": "(Bender and Friedman, 2018;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 522, |
|
"end": 544, |
|
"text": "Waseem and Hovy, 2016)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Most datasets currently used to train hate speech classifiers were collected through crowdsourced annotations (Davidson et al., 2017; Founta et al., 2018) , despite the risk of annotator bias. Waseem (2016) show that non-experts are more likely to label text as abusive than expert annotators, and Sap et al. (2019) show how lack of social context in annotation tasks further increases the risk of annotator bias, which can in turn lead to the marginalization of racial minorities. More specifically, annotators are more likely to label comments as abusive if they are written in African American English (AAE). These comments are assumed to be incorrectly labelled, as annotators do not mark them as abusive if they are properly primed with dialect and race information (Sap et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 110, |
|
"end": 133, |
|
"text": "(Davidson et al., 2017;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 134, |
|
"end": 154, |
|
"text": "Founta et al., 2018)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 193, |
|
"end": 206, |
|
"text": "Waseem (2016)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 298, |
|
"end": 315, |
|
"text": "Sap et al. (2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 771, |
|
"end": 789, |
|
"text": "(Sap et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "These biases in annotations are absorbed and amplified by automated classifiers. Classifiers trained on biased annotations are more likely to incorrectly label AAE text as abusive than non-AAE text: the false positive rate (FPR) is higher for AAE text, which risks further suppressing an already marginalized community. More formally, the disparity in FPR between groups is a violation of the Equality of Opportunity criterion, a commonly used metric of algorithmic fairness whose violation indicates discrimination (Hardt et al., 2016) . According to Sap et al. (2019) , the false positive rate for hate speech/abusive language of the AAE dialect can reach as high as 46%.", |
|
"cite_spans": [ |
|
{ |
|
"start": 516, |
|
"end": 536, |
|
"text": "(Hardt et al., 2016)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 552, |
|
"end": 569, |
|
"text": "Sap et al. (2019)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Thus, Sap et al. (2019) reveal two related issues in the task of hate speech classification: the first is biases in existing annotations, and the second is model tendencies to absorb and even amplify biases from spurious correlations present in datasets (Zhao et al., 2017; Lloyd, 2018) . While current datasets can be re-annotated, this process is timeconsuming and expensive. Furthermore, even with perfect annotations, current hate speech detection models may still learn and amplify spurious correlations between AAE and abusive language (Zhao et al., 2017; Lloyd, 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 6, |
|
"end": 23, |
|
"text": "Sap et al. (2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 254, |
|
"end": 273, |
|
"text": "(Zhao et al., 2017;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 274, |
|
"end": 286, |
|
"text": "Lloyd, 2018)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 542, |
|
"end": 561, |
|
"text": "(Zhao et al., 2017;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 562, |
|
"end": 574, |
|
"text": "Lloyd, 2018)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we present an adversarial approach to mitigating the risk of racial bias in hate speech classifiers, even when there might be annotation bias in the underlying training data. In \u00a72, we describe our methodology in general terms, as it can be useful in any text classification task that seeks to predict a target attribute (here, toxicity) without basing predictions on a protected attribute (here, AAE). Although we aim at preserving the utility of classification models, our primary goal is not to improve the raw performance over predicting the target attribute (hate speech detection), but rather to reduce the influence of the protected attribute.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In \u00a73 and \u00a74, we evaluate how well our approach reduces the risk of racial bias in hate speech classification by measuring the FPR of AAE text, i.e., how often the model incorrectly labels AAE text as abusive. We evaluate our methodology using two types of data: (1) a dataset inferred to be AAE using demographic information (Blodgett et al., 2016) , and (2) datasets annotated for hate speech (Davidson et al., 2017; Founta et al., 2018) where we automatically infer AAE dialect and then demote indicators of AAE in corresponding hate speech classifiers. Overall, our approach decreases the dialectal information encoded by the hate speech model, leading to a 2.2-3.2 percent reduction in FPR for AAE text, without sacrificing the utility of hate speech classification.", |
|
"cite_spans": [ |
|
{ |
|
"start": 302, |
|
"end": 349, |
|
"text": "demographic information (Blodgett et al., 2016)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 395, |
|
"end": 418, |
|
"text": "(Davidson et al., 2017;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 419, |
|
"end": 439, |
|
"text": "Founta et al., 2018)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our goal is to train a model that can predict a target attribute (abusive or not abusive language), but that does not base decisions off of confounds in data that result from protected attributes (e.g., AAE dialect). In order to achieve this, we use an adversarial objective, which discourages the model from encoding information about the protected attribute. Adversarial training is widely known for successfully adapting models to learn representations that are invariant to undesired attributes, such as demographics and topics, though they rarely disentangle attributes completely Elazar and Goldberg, 2018; Kumar et al., 2019; Lample et al., 2019; Landeiro et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 586, |
|
"end": 612, |
|
"text": "Elazar and Goldberg, 2018;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 613, |
|
"end": 632, |
|
"text": "Kumar et al., 2019;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 633, |
|
"end": 653, |
|
"text": "Lample et al., 2019;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 654, |
|
"end": 676, |
|
"text": "Landeiro et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Model Architecture Our demotion model consists of three parts: 1) An encoder H that encodes the text into a high dimensional space; 2) A binary classifier C that predicts the target attribute from the input text; 3) An adversary D that predicts the protected attribute from the input text. We used a single-layer bidirectional LSTM encoder with an attention mechanism. Both classifiers are two-layer MLPs with a tanh activation function.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Training Procedure Each data point in our training set is a triplet", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "{(x i , y i , z i ); i \u2208 1 . . . N }, where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "x i is the input text, y i is the label for the target attribute and z i is label of the protected attribute. The (x i , y i ) tuples are used to train the classifier C, and the (x i , z i ) tuple is used to train the adversary D.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We adapt a two-phase training procedure from Kumar et al. (2019) . We use this procedure because Kumar et al. (2019) show that their model is more effective than alternatives in a setting similar to ours, where the lexical indicators of the target and protected attributes are closely connected (e.g., words that are common in non-abusive AAE and are also common in abusive language datasets). In the first phase (pre-training), we use the standard supervised training objective to update encoder H and classifier C:", |
|
"cite_spans": [ |
|
{ |
|
"start": 45, |
|
"end": 64, |
|
"text": "Kumar et al. (2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 97, |
|
"end": 116, |
|
"text": "Kumar et al. (2019)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "min C,H N i=1 L(C(H(x i )), y i )", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "After pre-training, the encoder should encode all relevant information that is useful for predicting the target attribute, including information predictive of the protected attribute.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In the second phase, starting from the bestperforming checkpoint in the pre-training phase, we alternate training the adversary D with Equation 2 and the other two models (H and C) with Equation 3:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "min D 1 N N i=1 L(D(H(x i )), z i ) (2) min H,C 1 N N i=1 \u03b1 \u2022 L(C(H(x i )), y i )+ (1 \u2212 \u03b1) \u2022 L(D(H(x i )), 0.5)", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Unlike Kumar et al. 2019, we introduce a hyper-parameter \u03b1, which controls the balance between the two loss terms in Equation 3. We find that \u03b1 is crucial for correctly training the model (we detail this in \u00a73).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We first train the adversary to predict the protected attribute from the text representations outputted by the encoder. We then train the encoder to \"fool\" the adversary by generating representations that will cause the adversary to output random guesses, rather than accurate predictions. At the same time, we train the classifier to predict the target attribute from the encoder output.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Founta et al. 2018I am hungry and I am dirty as hell bruh, need dat shower and dem calories Blodgett et al. (2016) so much energy and time wasted hatin on someone when alla that coulda been put towards makin yourself better.... a. . . https://t.co/awCg1nCt8t 2016where the state-of-the-art model misclassifies innocuous tweets (inferred to be AAE) as abusive language. Our model correctly classifies these tweets as nontoxic.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset Example", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To the best of our knowledge, there are no datasets that are annotated both for toxicity and for AAE dialect. Instead, we use two toxicity datasets and one English dialect dataset that are all from the same domain (Twitter):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments 3.1 Dataset", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "DWMW17 (Davidson et al., 2017) A Twitter dataset that contains 25K tweets annotated as hate speech, offensive, or none. The authors define hate speech as language that is used to expresses hatred towards a targeted group or is intended to be derogatory, to humiliate, or to insult the members of the group, and offensive language as language that contains offensive terms which are not necessarily inappropriate.", |
|
"cite_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 30, |
|
"text": "(Davidson et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments 3.1 Dataset", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "FDCL18 (Founta et al., 2018) A Twitter dataset that contains 100K tweets annotated as hateful, abusive, spam or none. This labeling scheme was determined by conducting multiple rounds of crowdsourcing to understand how crowdworkers use different labels. Strongly impolite, rude, or hurtful language is considered abusive, and the definition of hate speech is the same as in DWMW17.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments 3.1 Dataset", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "BROD16 (Blodgett et al., 2016) A 20K sample out of a 1.15M English tweet corpus that is demographically associated with African American twitter users. Further analysis shows that the dataset contains significant linguistic features of African American English. In order to obtain dialect labels for the DWMW17 and FDCL18, we use an off-the-shelf demographically-aligned ensemble model (Blodgett et al., 2016) which learns a posterior topic distribution (topics corresponding to African American, Hispanic, White and Other) at a user, message, and word level. Blodgett et al. (2016) generate a AAE-aligned corpus comprising tweets from users labelled with at least 80% posterior probability as using AAE-associated terms. Similarly, following Sap et al. 2019, we assign AAE label to tweets with at least 80% posterior probability of containing AAE-associated terms at the message level and consider all other tweets as Non-AAE.", |
|
"cite_spans": [ |
|
{ |
|
"start": 386, |
|
"end": 409, |
|
"text": "(Blodgett et al., 2016)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments 3.1 Dataset", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In order to obtain toxicity labels for the BROD16 dataset, we consider all tweets in this dataset to be non-toxic. This is a reasonable assumption since hate speech is relatively rare compared to the large amount of non-abusive language on social media (Founta et al., 2018). 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments 3.1 Dataset", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In the pre-training phase, we train the model until convergence and pick the best-performing checkpoint for fine-tuning. In the fine-tuning phase, we alternate training one single adversary and the classification model each for two epochs in one round and train for 10 rounds in total.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Parameters", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We additionally tuned the \u03b1 parameter used to weight the loss terms in Equation 3 over validation sets. We found that the value of \u03b1 is important for obtaining text representations containing less dialectal information. A large \u03b1 easily leads to over-fitting and a drastic drop in validation accuracy for hate speech classification. However, a near zero \u03b1 severely reduces both training and validation accuracy. We ultimately set \u03b1 = 0.05.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Parameters", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We use the same architecture as Sap et al. (2019) as a baseline model, which does not contain an adversarial objective. For both of this baseline model and our model, because of the goal of demoting the influence of AAE markers, we select the model with the lowest false positive rate on validation set. We train models on both DWMW17 and FDCL18 datasets, which we split into train/dev/test subsets following Sap et al. (2019) . Table 3 : False positive rates (FPR), indicating how often AAE text is incorrectly classified as hateful or abusive, when training with the FDCL18 dataset. Our model consistently improves FPR for offensiveness, and performs slightly better than the baseline for hate speech detection. Table 2 reports accuracy and F1 scores over the hate speech classification task. Despite the adversarial component in our model, which makes this task more difficult, our model achieves comparable accuracy as the baseline and even improves F1 score. Furthermore, the results of our baseline model are on par with those reported in Sap et al. (2019) , which verifies the validity of our implementation. Next, we assess how well our demotion model reduces the false positive rate in AAE text in two ways: (1) we use our trained hate speech detection model to classify text inferred as AAE in BROD16 dataset, in which we assume there is no hateful or offensive speech and (2) we use our trained hate speech detection model to classify the test partitions of the DWMW17 and FDCL18 datasets, which are annotated for hateful and offensive speech and for which we use an off-the-shelf model to infer dialect, as described in \u00a73. Thus, for both evaluation criteria, we have or infer AAE labels and toxicity labels, and we can compute how often text inferred as AAE is misclassified as hateful, abusive, or offensive.", |
|
"cite_spans": [ |
|
{ |
|
"start": 409, |
|
"end": 426, |
|
"text": "Sap et al. (2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 1045, |
|
"end": 1062, |
|
"text": "Sap et al. (2019)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 429, |
|
"end": 436, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 714, |
|
"end": 721, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Training Parameters", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Notably, Sap et al. (2019) show that datasets that annotate text for hate speech without sufficient context-like DWMW17 and FDCL18-may suffer from inaccurate annotations, in that annotators Sap et al. (2019) suggest that annotators over-estimate the toxicity in AAE text, meaning FPRs over the DWMW17 and FDCL18 test sets are actually lower-bounds, and the true FPR is could be even higher. Furthermore, if we assume that the DWMW17 and FDCL18 training sets contain biased annotations, as suggested by Sap et al. (2019) , then a high FPR over the corresponding test sets suggests that the classification model amplifies bias in the training data, and labels non-toxic AAE text as toxic even when annotators did not. Table 3 reports results for both evaluation criteria when we train the model on the FDCL18 data. In both cases, our model successfully reduces FPR. For abusive language detection in the FDCL18 test set, the reduction in FPR is > 3; for hate speech detection, the FPR of our model is also reduced by 0.6 compared to the baseline model. We can also observe a 2.2 and 0.5 reduction in FPR for abusive speech and hate speech respectively when evaluating on BROD16 data. Table 4 reports results when we train the model on the DWMW17 dataset. Unlike Table 3 , unfortunately, our model fails to reduce the FPR rate for both offensive and hate speech of DWMW17 data. We also notice that our model trained with DWMW17 performs much worse than the model trained with FDCL18 data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 26, |
|
"text": "Sap et al. (2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 190, |
|
"end": 207, |
|
"text": "Sap et al. (2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 502, |
|
"end": 519, |
|
"text": "Sap et al. (2019)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 716, |
|
"end": 723, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1182, |
|
"end": 1189, |
|
"text": "Table 4", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 1260, |
|
"end": 1267, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "To understand the poor performance of our model when trained and evaluated on DWMW17 data, we investigated the data distribution in the test set and found that the vast majority of tweets Figure 1 : Accuracy of the entire development set of FDCL18 (top), and FPR rate for abusive (middle) and hate (bottom) speech detection for tweets inferred as AAE in the development set. X axis denotes the number of epochs. 0th epoch is the best checkpoint for pretraining step, which is also the baseline model. labeled as AAE by the dialect classifier were also annotated as toxic (97%). Thus, the subset of the data over which our model might improve FPR consists of merely < 3% of the AAE portion of the test set (49 tweets). In comparison, 70.98% of the tweets in the FDCL18 test set that were labeled as AAE were also annotated as toxic. Thus, we hypothesize that the performance of our model over the DWMW17 test set is not a representative estimate of how well our model reduces bias, because the improvable set in the DWMW17 is too small.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 188, |
|
"end": 196, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In Table 1 , we provide two examples of tweets that the baseline classifier misclassifies abusive/offensive, but our model, correctly classifies as non-toxic. Both examples are drawn from a toxicity dataset and are classified as AAE by the dialectal prediction model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Trade-off between FPR and Accuracy In order to better understand model performance, we explored the accuracy and FPR of our model throughout the entire training process. We evaluate the best checkpoint of the pre-trained model (0 th epoch) and checkpoints of each epoch during adversarial training and show the results in Figure 1 . While the baseline model (0 th epoch, before any adversarial training) achieves high accuracy, it also has a high FPR rate, particularly over abusive language. After adversarial training, the FPR rate decreases with only minor changes in accuracy. However, checkpoints with lower FPR rates also often have lower accuracy. While Tables 2 and 3 suggest that our model does achieve a balance between these metrics, Figure 1 shows the difficulty of this task; that is, it is difficult to disentangle these attributes completely. In Figure 2, we plot the validation accuracy of the adversary through the entire training process in order to verify that our model does learn a text representation at least partially free of dialectal information. Further, we compare using one adversary during training with using multiple adversaries (Kumar et al., 2019) . Through the course of training, the validation accuracy of AAE prediction decreases by about 6-10 and 2-5 points for both datasets, indicating that dialectal information is gradually removed from the encoded representation. However, after a certain training threshold (6 epochs for DWMW17 and 8 epochs for FDCL18), the accuracy of the classifier (not shown) also drops drastically, indicating that dialectal information cannot be completely eliminated from the text representation without also decreasing the accuracy of hatespeech classification. Multiple adversaries generally cause a greater decrease in AAE prediction than a single adversary, but do not necessarily lead to a lower FPR and a higher classification accuracy. We attribute this to the difference in experimental setups: in our settings, we focus on one attribute to demote, whereas Kumar et al. (2019) had to demote ten latent attributes and thus required multiple adversaries to stabilize the demotion model. Thus, unlike in (Kumar et al., 2019) , our settings do not require multiple adversaries, and indeed, we do not see improvements from using multiple adversaries.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1161, |
|
"end": 1181, |
|
"text": "(Kumar et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 2034, |
|
"end": 2053, |
|
"text": "Kumar et al. (2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 2178, |
|
"end": 2198, |
|
"text": "(Kumar et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 322, |
|
"end": 330, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 661, |
|
"end": 675, |
|
"text": "Tables 2 and 3", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 745, |
|
"end": 753, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 858, |
|
"end": 867, |
|
"text": "In Figure", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Preventing neural models from absorbing or even amplifying unwanted artifacts present in datasets is indispensable towards building machine learning systems without unwanted biases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "One thread of work focuses on removing bias at the data level, through reducing annotator bias (Sap et al., 2019) and augmenting imbalanced datasets (Jurgens et al., 2017) . Dixon et al. (2018) propose an unsupervised method based on balancing the training set and employing a proposed measurement for mitigating unintended bias in text classification models. Webster et al. (2018) present a gender-balanced dataset with ambiguous name-pair pronouns to provide diversity coverage for realworld data. In addition to annotator bias, sampling strategies also result in topic and author bias in datasets of abusive language detection, leading to decreased classification performance when testing in more realistic settings, necessitating the adoption of cross-domain evaluation for fairness (Wiegand et al., 2019) . A related thread of work on debiasing focuses at the model level (Zhao et al., 2019) . Adversarial training has been used to remove protected features from word embeddings (Xie et al., 2017; Zhang et al., 2018) and intermediate representations for both texts (Elazar and Goldberg, 2018; Zhang et al., 2018) and images (Edwards and Storkey, 2015; Wang et al., 2018) . Though previous works have documented that adversarial training fails to obliterate protected features, Kumar et al. (2019) show that using multiple adversaries more effectively forces the removal.", |
|
"cite_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 113, |
|
"text": "(Sap et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 171, |
|
"text": "(Jurgens et al., 2017)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 174, |
|
"end": 193, |
|
"text": "Dixon et al. (2018)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 360, |
|
"end": 381, |
|
"text": "Webster et al. (2018)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 787, |
|
"end": 809, |
|
"text": "(Wiegand et al., 2019)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 877, |
|
"end": 896, |
|
"text": "(Zhao et al., 2019)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 984, |
|
"end": 1002, |
|
"text": "(Xie et al., 2017;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1003, |
|
"end": 1022, |
|
"text": "Zhang et al., 2018)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1071, |
|
"end": 1098, |
|
"text": "(Elazar and Goldberg, 2018;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1099, |
|
"end": 1118, |
|
"text": "Zhang et al., 2018)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1130, |
|
"end": 1157, |
|
"text": "(Edwards and Storkey, 2015;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1158, |
|
"end": 1176, |
|
"text": "Wang et al., 2018)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1283, |
|
"end": 1302, |
|
"text": "Kumar et al. (2019)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Along similar lines, multitask learning has been adopted for learning task-invariant representations. Vaidya et al. (2019) show that multitask training on a related task e.g., identity prediction, allows the model to shift focus to toxic-related elements in hate speech detection.", |
|
"cite_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 122, |
|
"text": "Vaidya et al. (2019)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this work, we use adversarial training to demote a protected attribute (AAE dialect) when training a classifier to predict a target attribute (toxicity). While we focus on AAE dialect and toxicity, our methodology readily generalizes to other settings, such as reducing bias related to age, gender, or income-level in any other text classification task. Overall, our approach has the potential to improve fairness and reduce bias in NLP models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We additionally did a simple check for abusive terms using a list of 20 hate speech words, randomly selected from Hatebase.org. We found that the percentage of sentences containing these words is much lower in AAE dataset (\u2248 2%) than hate speech datasets (\u2248 20%).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We gratefully thank anonymous reviewers, Maarten Sap, and Dallas Card for their help with this work. The second author of this work is supported by the NSF Graduate Research Fellowship Program under Grant No. DGE1745016. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the authors and do not necessarily reflect the views of the NSF. We also gratefully acknowledge Public Interest Technology University Network Grant No. NVF-PITU-Carnegie Mellon University-Subgrant-009246-2019-10-01 for supporting this research.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": "7" |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Data statements for natural language processing: Toward mitigating system bias and enabling better science", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Emily", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Batya", |
|
"middle": [], |
|
"last": "Bender", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Friedman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "587--604", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily M Bender and Batya Friedman. 2018. Data statements for natural language processing: Toward mitigating system bias and enabling better science. Transactions of the Association for Computational Linguistics, 6:587-604.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Demographic dialectal variation in social media: A case study of african-american english", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Su Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lisa", |
|
"middle": [], |
|
"last": "Blodgett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brendan O'", |
|
"middle": [], |
|
"last": "Green", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Connor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1119--1130", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Su Lin Blodgett, Lisa Green, and Brendan O'Connor. 2016. Demographic dialectal variation in social media: A case study of african-american english. In Proceedings of the 2016 Conference on Empiri- cal Methods in Natural Language Processing, pages 1119-1130.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Automated hate speech detection and the problem of offensive language", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Davidson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dana", |
|
"middle": [], |
|
"last": "Warmsley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Macy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ingmar", |
|
"middle": [], |
|
"last": "Weber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Eleventh international aaai conference on web and social media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Davidson, Dana Warmsley, Michael Macy, and Ingmar Weber. 2017. Automated hate speech detection and the problem of offensive language. In Eleventh international aaai conference on web and social media.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Measuring and mitigating unintended bias in text classification", |
|
"authors": [ |
|
{ |
|
"first": "Lucas", |
|
"middle": [], |
|
"last": "Dixon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Sorensen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nithum", |
|
"middle": [], |
|
"last": "Thain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucy", |
|
"middle": [], |
|
"last": "Vasserman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "67--73", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lucas Dixon, John Li, Jeffrey Sorensen, Nithum Thain, and Lucy Vasserman. 2018. Measuring and mitigat- ing unintended bias in text classification. In Pro- ceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society, pages 67-73.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Censoring representations with an adversary", |
|
"authors": [ |
|
{ |
|
"first": "Harrison", |
|
"middle": [], |
|
"last": "Edwards", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amos", |
|
"middle": [], |
|
"last": "Storkey", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1511.05897" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Harrison Edwards and Amos Storkey. 2015. Censoring representations with an adversary. arXiv preprint arXiv:1511.05897.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Adversarial removal of demographic attributes from text data", |
|
"authors": [ |
|
{ |
|
"first": "Yanai", |
|
"middle": [], |
|
"last": "Elazar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11--21", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yanai Elazar and Yoav Goldberg. 2018. Adversarial removal of demographic attributes from text data. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 11- 21.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A survey on automatic detection of hate speech in text", |
|
"authors": [ |
|
{ |
|
"first": "Paula", |
|
"middle": [], |
|
"last": "Fortuna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S\u00e9rgio", |
|
"middle": [], |
|
"last": "Nunes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ACM Computing Surveys (CSUR)", |
|
"volume": "51", |
|
"issue": "4", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paula Fortuna and S\u00e9rgio Nunes. 2018. A survey on au- tomatic detection of hate speech in text. ACM Com- puting Surveys (CSUR), 51(4):85.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Large scale crowdsourcing and characterization of twitter abusive behavior", |
|
"authors": [ |
|
{ |
|
"first": "Constantinos", |
|
"middle": [], |
|
"last": "Antigoni Maria Founta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Despoina", |
|
"middle": [], |
|
"last": "Djouvas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilias", |
|
"middle": [], |
|
"last": "Chatzakou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Leontiadis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gianluca", |
|
"middle": [], |
|
"last": "Blackburn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Athena", |
|
"middle": [], |
|
"last": "Stringhini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Vakali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicolas", |
|
"middle": [], |
|
"last": "Sirivianos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kourtellis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Twelfth International AAAI Conference on Web and Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antigoni Maria Founta, Constantinos Djouvas, De- spoina Chatzakou, Ilias Leontiadis, Jeremy Black- burn, Gianluca Stringhini, Athena Vakali, Michael Sirivianos, and Nicolas Kourtellis. 2018. Large scale crowdsourcing and characterization of twitter abusive behavior. In Twelfth International AAAI Conference on Web and Social Media.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Equality of opportunity in supervised learning", |
|
"authors": [ |
|
{ |
|
"first": "Moritz", |
|
"middle": [], |
|
"last": "Hardt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Price", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Srebro", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 30th International Conference on Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3323--3331", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Moritz Hardt, Eric Price, and Nathan Srebro. 2016. Equality of opportunity in supervised learning. In Proceedings of the 30th International Conference on Neural Information Processing Systems, pages 3323-3331.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Incorporating dialectal variability for socially equitable language identification", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Jurgens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulia", |
|
"middle": [], |
|
"last": "Tsvetkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "51--57", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Jurgens, Yulia Tsvetkov, and Dan Jurafsky. 2017. Incorporating dialectal variability for socially equi- table language identification. In Proceedings of the 55th Annual Meeting of the Association for Compu- tational Linguistics (Volume 2: Short Papers), vol- ume 2, pages 51-57.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Topics to avoid: Demoting latent confounds in text classification", |
|
"authors": [ |
|
{ |
|
"first": "Sachin", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuly", |
|
"middle": [], |
|
"last": "Wintner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Noah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulia", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Tsvetkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4144--4154", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sachin Kumar, Shuly Wintner, Noah A Smith, and Yu- lia Tsvetkov. 2019. Topics to avoid: Demoting la- tent confounds in text classification. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th Interna- tional Joint Conference on Natural Language Pro- cessing (EMNLP-IJCNLP), pages 4144-4154.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Multiple-attribute text rewriting", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandeep", |
|
"middle": [], |
|
"last": "Subramanian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ludovic", |
|
"middle": [], |
|
"last": "Denoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc'aurelio", |
|
"middle": [], |
|
"last": "Ranzato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y-Lan", |
|
"middle": [], |
|
"last": "Boureau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample, Sandeep Subramanian, Eric Smith, Ludovic Denoyer, Marc'Aurelio Ranzato, and Y- Lan Boureau. 2019. Multiple-attribute text rewrit- ing. In International Conference on Learning Rep- resentations.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Discovering and controlling for latent confounds in text classification using adversarial domain adaptation", |
|
"authors": [ |
|
{ |
|
"first": "Virgile", |
|
"middle": [], |
|
"last": "Landeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tuan", |
|
"middle": [], |
|
"last": "Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aron", |
|
"middle": [], |
|
"last": "Culotta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 SIAM International Conference on Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "298--305", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Virgile Landeiro, Tuan Tran, and Aron Culotta. 2019. Discovering and controlling for latent confounds in text classification using adversarial domain adapta- tion. In Proceedings of the 2019 SIAM International Conference on Data Mining, pages 298-305. SIAM.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Towards robust and privacy-preserving text representations", |
|
"authors": [ |
|
{ |
|
"first": "Yitong", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Baldwin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Cohn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "25--30", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-2005" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yitong Li, Timothy Baldwin, and Trevor Cohn. 2018. Towards robust and privacy-preserving text represen- tations. In Proceedings of the 56th Annual Meet- ing of the Association for Computational Linguistics (Volume 2: Short Papers), pages 25-30, Melbourne, Australia. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Bias amplification in artificial intelligence systems", |
|
"authors": [ |
|
{ |
|
"first": "Kirsten", |
|
"middle": [], |
|
"last": "Lloyd", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "CoRR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kirsten Lloyd. 2018. Bias amplification in artificial in- telligence systems. CoRR, abs/1809.07842.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Spread of hate speech in online social media", |
|
"authors": [ |
|
{ |
|
"first": "Binny", |
|
"middle": [], |
|
"last": "Mathew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ritam", |
|
"middle": [], |
|
"last": "Dutt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pawan", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Animesh", |
|
"middle": [], |
|
"last": "Mukherjee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 10th ACM Conference on Web Science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "173--182", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Binny Mathew, Ritam Dutt, Pawan Goyal, and Ani- mesh Mukherjee. 2019. Spread of hate speech in on- line social media. In Proceedings of the 10th ACM Conference on Web Science, pages 173-182. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "A measurement study of hate speech in social media", |
|
"authors": [ |
|
{ |
|
"first": "Mainack", |
|
"middle": [], |
|
"last": "Mondal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leandro", |
|
"middle": [ |
|
"Ara\u00fajo" |
|
], |
|
"last": "Silva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabr\u00edcio", |
|
"middle": [], |
|
"last": "Benevenuto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 28th ACM Conference on Hypertext and Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "85--94", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mainack Mondal, Leandro Ara\u00fajo Silva, and Fabr\u00edcio Benevenuto. 2017. A measurement study of hate speech in social media. In Proceedings of the 28th ACM Conference on Hypertext and Social Media, pages 85-94. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "The risk of racial bias in hate speech detection", |
|
"authors": [ |
|
{ |
|
"first": "Maarten", |
|
"middle": [], |
|
"last": "Sap", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dallas", |
|
"middle": [], |
|
"last": "Card", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saadia", |
|
"middle": [], |
|
"last": "Gabriel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah A", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1668--1678", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maarten Sap, Dallas Card, Saadia Gabriel, Yejin Choi, and Noah A Smith. 2019. The risk of racial bias in hate speech detection. In Proceedings of the 57th Annual Meeting of the Association for Compu- tational Linguistics, pages 1668-1678.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "A survey on hate speech detection using natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Schmidt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Wiegand", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Fifth International Workshop on Natural Language Processing for Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--10", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-1101" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anna Schmidt and Michael Wiegand. 2017. A survey on hate speech detection using natural language pro- cessing. In Proceedings of the Fifth International Workshop on Natural Language Processing for So- cial Media, pages 1-10, Valencia, Spain. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Analyzing the targets of hate in online social media", |
|
"authors": [ |
|
{ |
|
"first": "Leandro", |
|
"middle": [], |
|
"last": "Silva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mainack", |
|
"middle": [], |
|
"last": "Mondal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Denzil", |
|
"middle": [], |
|
"last": "Correa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabr\u00edcio", |
|
"middle": [], |
|
"last": "Benevenuto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ingmar", |
|
"middle": [], |
|
"last": "Weber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Tenth International AAAI Conference on Web and Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leandro Silva, Mainack Mondal, Denzil Correa, Fabr\u00edcio Benevenuto, and Ingmar Weber. 2016. An- alyzing the targets of hate in online social media. In Tenth International AAAI Conference on Web and Social Media.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Empirical analysis of multi-task learning for reducing model bias in toxic comment detection", |
|
"authors": [ |
|
{ |
|
"first": "Ameya", |
|
"middle": [], |
|
"last": "Vaidya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Feng", |
|
"middle": [], |
|
"last": "Mai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Ning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1909.09758" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ameya Vaidya, Feng Mai, and Yue Ning. 2019. Em- pirical analysis of multi-task learning for reducing model bias in toxic comment detection. arXiv preprint arXiv:1909.09758.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Adversarial removal of gender from deep image representations", |
|
"authors": [ |
|
{ |
|
"first": "Tianlu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jieyu", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vicente", |
|
"middle": [], |
|
"last": "Ordonez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1811.08489" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianlu Wang, Jieyu Zhao, Kai-Wei Chang, Mark Yatskar, and Vicente Ordonez. 2018. Adversarial re- moval of gender from deep image representations. arXiv preprint arXiv:1811.08489.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Are you a racist or am i seeing things? annotator influence on hate speech detection on twitter", |
|
"authors": [ |
|
{ |
|
"first": "Zeerak", |
|
"middle": [], |
|
"last": "Waseem", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the first workshop on NLP and computational social science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "138--142", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zeerak Waseem. 2016. Are you a racist or am i seeing things? annotator influence on hate speech detection on twitter. In Proceedings of the first workshop on NLP and computational social science, pages 138- 142.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Hateful symbols or hateful people? predictive features for hate speech detection on twitter", |
|
"authors": [ |
|
{ |
|
"first": "Zeerak", |
|
"middle": [], |
|
"last": "Waseem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the NAACL student research workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "88--93", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zeerak Waseem and Dirk Hovy. 2016. Hateful sym- bols or hateful people? predictive features for hate speech detection on twitter. In Proceedings of the NAACL student research workshop, pages 88-93.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Mind the gap: A balanced corpus of gendered ambiguous pronouns", |
|
"authors": [ |
|
{ |
|
"first": "Kellie", |
|
"middle": [], |
|
"last": "Webster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marta", |
|
"middle": [], |
|
"last": "Recasens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vera", |
|
"middle": [], |
|
"last": "Axelrod", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Baldridge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "605--617", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kellie Webster, Marta Recasens, Vera Axelrod, and Ja- son Baldridge. 2018. Mind the gap: A balanced corpus of gendered ambiguous pronouns. Transac- tions of the Association for Computational Linguis- tics, 6:605-617.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Detection of abusive language: the problem of biased datasets", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Wiegand", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Ruppenhofer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Kleinbauer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "602--608", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Wiegand, Josef Ruppenhofer, and Thomas Kleinbauer. 2019. Detection of abusive language: the problem of biased datasets. In Proceedings of the 2019 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 602-608.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Controllable invariance through adversarial feature learning", |
|
"authors": [ |
|
{ |
|
"first": "Qizhe", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulun", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 31st International Conference on Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "585--596", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qizhe Xie, Zihang Dai, Yulun Du, Eduard Hovy, and Graham Neubig. 2017. Controllable invariance through adversarial feature learning. In Proceedings of the 31st International Conference on Neural Infor- mation Processing Systems, pages 585-596.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Mitigating unwanted biases with adversarial learning", |
|
"authors": [ |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Hu Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Blake", |
|
"middle": [], |
|
"last": "Lemoine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Margaret", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Brian Hu Zhang, Blake Lemoine, and Margaret Mitchell. 2018. Mitigating unwanted biases with adversarial learning. In Proceedings of the 2018", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "AAAI/ACM Conference on AI, Ethics, and Society", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "335--340", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "AAAI/ACM Conference on AI, Ethics, and Society, pages 335-340.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Gender bias in contextualized word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Jieyu", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianlu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Cotterell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vicente", |
|
"middle": [], |
|
"last": "Ordonez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "629--634", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jieyu Zhao, Tianlu Wang, Mark Yatskar, Ryan Cot- terell, Vicente Ordonez, and Kai-Wei Chang. 2019. Gender bias in contextualized word embeddings. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 629-634.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Men also like shopping: Reducing gender bias amplification using corpus-level constraints", |
|
"authors": [ |
|
{ |
|
"first": "Jieyu", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianlu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vicente", |
|
"middle": [], |
|
"last": "Ordonez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2979--2989", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jieyu Zhao, Tianlu Wang, Mark Yatskar, Vicente Or- donez, and Kai-Wei Chang. 2017. Men also like shopping: Reducing gender bias amplification using corpus-level constraints. In Proceedings of the 2017 Conference on Empirical Methods in Natural Lan- guage Processing, pages 2979-2989.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Validation accuracy on AAE prediction of the adversary in the whole training process. The green line denotes the training setting of one adversary and the orange line denotes the training setting of multiple adversaries.", |
|
"uris": null |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"content": "<table/>", |
|
"text": "Example from Founta et al. (2018) and Blodgett et al." |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"2\">Accuracy</td><td>F1</td><td/></tr><tr><td>base</td><td>ours</td><td>base</td><td>ours</td></tr></table>", |
|
"text": "DWMW17 91.90 90.68 75.15 76.05 FDCL18 81.18 80.27 66.15 66.80" |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"3\">: Accuracy and F1 scores for detecting abu-</td></tr><tr><td colspan=\"3\">sive language. F1 values are macro-averaged across all</td></tr><tr><td colspan=\"3\">classification categories (e.g. hate, offensive, none for</td></tr><tr><td colspan=\"3\">DWMW17). Our model achieves an accuracy and F1</td></tr><tr><td colspan=\"2\">on par with the baseline model.</td></tr><tr><td/><td colspan=\"2\">Offensive</td><td>Hate</td></tr><tr><td/><td>base</td><td>ours base ours</td></tr><tr><td colspan=\"3\">FDCL18-AAE 20.94 17.69 3.23 2.60</td></tr><tr><td>BROD16</td><td colspan=\"2\">16.44 14.29 5.03 4.52</td></tr></table>", |
|
"text": "" |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"2\">Offensive</td><td>Hate</td></tr><tr><td>base</td><td colspan=\"2\">ours base ours</td></tr><tr><td>DWMW17-</td><td/></tr></table>", |
|
"text": "AAE 38.27 42.59 0.70 2.06 BROD16 23.68 24.34 0.28 0.83" |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td>: False positive rates (FPR), indicating how of-</td></tr><tr><td>ten AAE text is incorrectly classified as hateful or of-</td></tr><tr><td>fensive, when training with DWMW17 dataset. Our</td></tr><tr><td>model fails to improve FPR over the baseline, since</td></tr><tr><td>97% of AAE-labeled instances in the dataset are also</td></tr><tr><td>labeled as toxic.</td></tr><tr><td>are more likely to label non-abusive AAE text as</td></tr><tr><td>abusive. However, despite the risk of inaccurate</td></tr><tr><td>annotations, we can still use these datasets to eval-</td></tr><tr><td>uate racial bias in toxicity detection because of our</td></tr><tr><td>focus on FPR. In particular, to analyze false posi-</td></tr><tr><td>tives, we need to analyze the classifier's predictions</td></tr><tr><td>of the text as toxic, when annotators labeled it as</td></tr><tr><td>non-toxic.</td></tr></table>", |
|
"text": "" |
|
} |
|
} |
|
} |
|
} |